summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/mongo/client/examples/mongoperf.cpp14
-rw-r--r--src/mongo/client/remote_command_targeter.h2
-rw-r--r--src/mongo/client/remote_command_targeter_factory_mock.cpp4
-rw-r--r--src/mongo/client/remote_command_targeter_mock.cpp2
-rw-r--r--src/mongo/client/remote_command_targeter_mock.h2
-rw-r--r--src/mongo/client/remote_command_targeter_rs.cpp6
-rw-r--r--src/mongo/client/remote_command_targeter_rs.h2
-rw-r--r--src/mongo/client/remote_command_targeter_standalone.cpp2
-rw-r--r--src/mongo/client/remote_command_targeter_standalone.h2
-rw-r--r--src/mongo/db/assemble_response.cpp146
-rw-r--r--src/mongo/db/assemble_response.h2
-rw-r--r--src/mongo/db/audit.cpp2
-rw-r--r--src/mongo/db/audit.h2
-rw-r--r--src/mongo/db/auth/auth_index_d.cpp14
-rw-r--r--src/mongo/db/auth/auth_index_d.h4
-rw-r--r--src/mongo/db/auth/authorization_manager.cpp42
-rw-r--r--src/mongo/db/auth/authorization_manager.h20
-rw-r--r--src/mongo/db/auth/authorization_manager_global.cpp6
-rw-r--r--src/mongo/db/auth/authorization_manager_test.cpp62
-rw-r--r--src/mongo/db/auth/authorization_session.cpp25
-rw-r--r--src/mongo/db/auth/authorization_session.h12
-rw-r--r--src/mongo/db/auth/authorization_session_test.cpp62
-rw-r--r--src/mongo/db/auth/authz_manager_external_state.cpp6
-rw-r--r--src/mongo/db/auth/authz_manager_external_state.h18
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_d.cpp10
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_d.h4
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_local.cpp51
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_local.h29
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_mock.cpp26
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_mock.h14
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_s.cpp50
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_s.h14
-rw-r--r--src/mongo/db/auth/authz_session_external_state.h2
-rw-r--r--src/mongo/db/auth/authz_session_external_state_d.cpp6
-rw-r--r--src/mongo/db/auth/authz_session_external_state_d.h2
-rw-r--r--src/mongo/db/auth/authz_session_external_state_mock.h2
-rw-r--r--src/mongo/db/auth/authz_session_external_state_s.cpp4
-rw-r--r--src/mongo/db/auth/authz_session_external_state_s.h2
-rw-r--r--src/mongo/db/auth/authz_session_external_state_server_common.cpp4
-rw-r--r--src/mongo/db/auth/authz_session_external_state_server_common.h2
-rw-r--r--src/mongo/db/auth/impersonation_session.cpp11
-rw-r--r--src/mongo/db/auth/impersonation_session.h4
-rw-r--r--src/mongo/db/auth/role_graph.h2
-rw-r--r--src/mongo/db/auth/role_graph_update.cpp8
-rw-r--r--src/mongo/db/auth/sasl_authentication_session.cpp2
-rw-r--r--src/mongo/db/auth/sasl_authentication_session.h8
-rw-r--r--src/mongo/db/auth/sasl_commands.cpp12
-rw-r--r--src/mongo/db/auth/sasl_scramsha1_test.cpp24
-rw-r--r--src/mongo/db/auth/user_cache_invalidator_job.cpp14
-rw-r--r--src/mongo/db/auth/user_cache_invalidator_job.h2
-rw-r--r--src/mongo/db/catalog/apply_ops.cpp72
-rw-r--r--src/mongo/db/catalog/apply_ops.h2
-rw-r--r--src/mongo/db/catalog/capped_utils.cpp86
-rw-r--r--src/mongo/db/catalog/capped_utils.h6
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp46
-rw-r--r--src/mongo/db/catalog/coll_mod.h4
-rw-r--r--src/mongo/db/catalog/collection.cpp284
-rw-r--r--src/mongo/db/catalog/collection.h62
-rw-r--r--src/mongo/db/catalog/collection_catalog_entry.h32
-rw-r--r--src/mongo/db/catalog/collection_compact.cpp28
-rw-r--r--src/mongo/db/catalog/collection_info_cache.cpp46
-rw-r--r--src/mongo/db/catalog/collection_info_cache.h16
-rw-r--r--src/mongo/db/catalog/create_collection.cpp18
-rw-r--r--src/mongo/db/catalog/create_collection.h2
-rw-r--r--src/mongo/db/catalog/cursor_manager.cpp53
-rw-r--r--src/mongo/db/catalog/cursor_manager.h12
-rw-r--r--src/mongo/db/catalog/database.cpp158
-rw-r--r--src/mongo/db/catalog/database.h38
-rw-r--r--src/mongo/db/catalog/database_catalog_entry.h6
-rw-r--r--src/mongo/db/catalog/database_holder.cpp28
-rw-r--r--src/mongo/db/catalog/database_holder.h8
-rw-r--r--src/mongo/db/catalog/document_validation.h14
-rw-r--r--src/mongo/db/catalog/drop_collection.cpp25
-rw-r--r--src/mongo/db/catalog/drop_collection.h2
-rw-r--r--src/mongo/db/catalog/drop_database.cpp26
-rw-r--r--src/mongo/db/catalog/drop_database.h2
-rw-r--r--src/mongo/db/catalog/drop_indexes.cpp37
-rw-r--r--src/mongo/db/catalog/drop_indexes.h2
-rw-r--r--src/mongo/db/catalog/head_manager.h4
-rw-r--r--src/mongo/db/catalog/index_catalog.cpp296
-rw-r--r--src/mongo/db/catalog/index_catalog.h87
-rw-r--r--src/mongo/db/catalog/index_catalog_entry.cpp71
-rw-r--r--src/mongo/db/catalog/index_catalog_entry.h18
-rw-r--r--src/mongo/db/catalog/index_create.cpp63
-rw-r--r--src/mongo/db/catalog/index_create.h4
-rw-r--r--src/mongo/db/catalog/index_key_validate.cpp4
-rw-r--r--src/mongo/db/catalog/index_key_validate.h2
-rw-r--r--src/mongo/db/catalog/index_spec_validate_test.cpp12
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp86
-rw-r--r--src/mongo/db/catalog/rename_collection.h2
-rw-r--r--src/mongo/db/client.cpp10
-rw-r--r--src/mongo/db/client.h8
-rw-r--r--src/mongo/db/clientcursor.cpp10
-rw-r--r--src/mongo/db/clientcursor.h2
-rw-r--r--src/mongo/db/clientlistplugin.cpp14
-rw-r--r--src/mongo/db/cloner.cpp144
-rw-r--r--src/mongo/db/cloner.h10
-rw-r--r--src/mongo/db/commands.cpp52
-rw-r--r--src/mongo/db/commands.h33
-rw-r--r--src/mongo/db/commands/apply_ops_cmd.cpp14
-rw-r--r--src/mongo/db/commands/apply_ops_cmd_common.cpp20
-rw-r--r--src/mongo/db/commands/apply_ops_cmd_common.h2
-rw-r--r--src/mongo/db/commands/authentication_commands.cpp26
-rw-r--r--src/mongo/db/commands/authentication_commands.h8
-rw-r--r--src/mongo/db/commands/clone.cpp10
-rw-r--r--src/mongo/db/commands/clone_collection.cpp8
-rw-r--r--src/mongo/db/commands/collection_to_capped.cpp14
-rw-r--r--src/mongo/db/commands/compact.cpp12
-rw-r--r--src/mongo/db/commands/conn_pool_stats.cpp6
-rw-r--r--src/mongo/db/commands/conn_pool_sync.cpp2
-rw-r--r--src/mongo/db/commands/connection_status.cpp2
-rw-r--r--src/mongo/db/commands/copydb.cpp18
-rw-r--r--src/mongo/db/commands/copydb_start_commands.cpp8
-rw-r--r--src/mongo/db/commands/count_cmd.cpp22
-rw-r--r--src/mongo/db/commands/cpuprofile.cpp20
-rw-r--r--src/mongo/db/commands/create_indexes.cpp70
-rw-r--r--src/mongo/db/commands/current_op.cpp8
-rw-r--r--src/mongo/db/commands/dbcommands.cpp263
-rw-r--r--src/mongo/db/commands/dbhash.cpp16
-rw-r--r--src/mongo/db/commands/dbhash.h2
-rw-r--r--src/mongo/db/commands/distinct.cpp32
-rw-r--r--src/mongo/db/commands/driverHelpers.cpp2
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp32
-rw-r--r--src/mongo/db/commands/eval.cpp18
-rw-r--r--src/mongo/db/commands/explain_cmd.cpp20
-rw-r--r--src/mongo/db/commands/fail_point_cmd.cpp2
-rw-r--r--src/mongo/db/commands/feature_compatibility_version.cpp35
-rw-r--r--src/mongo/db/commands/feature_compatibility_version.h5
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp122
-rw-r--r--src/mongo/db/commands/find_cmd.cpp52
-rw-r--r--src/mongo/db/commands/fsync.cpp40
-rw-r--r--src/mongo/db/commands/generic.cpp18
-rw-r--r--src/mongo/db/commands/geo_near_cmd.cpp25
-rw-r--r--src/mongo/db/commands/get_last_error.cpp22
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp66
-rw-r--r--src/mongo/db/commands/group_cmd.cpp18
-rw-r--r--src/mongo/db/commands/hashcmd.cpp2
-rw-r--r--src/mongo/db/commands/haystack.cpp8
-rw-r--r--src/mongo/db/commands/index_filter_commands.cpp40
-rw-r--r--src/mongo/db/commands/index_filter_commands.h14
-rw-r--r--src/mongo/db/commands/index_filter_commands_test.cpp105
-rw-r--r--src/mongo/db/commands/isself.cpp2
-rw-r--r--src/mongo/db/commands/kill_op.cpp6
-rw-r--r--src/mongo/db/commands/killcursors_cmd.cpp16
-rw-r--r--src/mongo/db/commands/killcursors_common.cpp6
-rw-r--r--src/mongo/db/commands/killcursors_common.h6
-rw-r--r--src/mongo/db/commands/list_collections.cpp32
-rw-r--r--src/mongo/db/commands/list_databases.cpp14
-rw-r--r--src/mongo/db/commands/list_indexes.cpp18
-rw-r--r--src/mongo/db/commands/lock_info.cpp4
-rw-r--r--src/mongo/db/commands/mr.cpp278
-rw-r--r--src/mongo/db/commands/mr.h12
-rw-r--r--src/mongo/db/commands/oplog_note.cpp10
-rw-r--r--src/mongo/db/commands/parallel_collection_scan.cpp14
-rw-r--r--src/mongo/db/commands/parameters.cpp18
-rw-r--r--src/mongo/db/commands/pipeline_command.cpp71
-rw-r--r--src/mongo/db/commands/plan_cache_commands.cpp42
-rw-r--r--src/mongo/db/commands/plan_cache_commands.h16
-rw-r--r--src/mongo/db/commands/plan_cache_commands_test.cpp109
-rw-r--r--src/mongo/db/commands/rename_collection_cmd.cpp10
-rw-r--r--src/mongo/db/commands/repair_cursor.cpp12
-rw-r--r--src/mongo/db/commands/server_status.cpp22
-rw-r--r--src/mongo/db/commands/server_status.h10
-rw-r--r--src/mongo/db/commands/set_feature_compatibility_version_command.cpp4
-rw-r--r--src/mongo/db/commands/snapshot_management.cpp19
-rw-r--r--src/mongo/db/commands/test_commands.cpp54
-rw-r--r--src/mongo/db/commands/top_command.cpp4
-rw-r--r--src/mongo/db/commands/touch.cpp6
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp330
-rw-r--r--src/mongo/db/commands/validate.cpp10
-rw-r--r--src/mongo/db/commands/write_commands/write_commands.cpp64
-rw-r--r--src/mongo/db/commands_helpers.h4
-rw-r--r--src/mongo/db/concurrency/d_concurrency.h2
-rw-r--r--src/mongo/db/db.cpp84
-rw-r--r--src/mongo/db/db_raii.cpp98
-rw-r--r--src/mongo/db/db_raii.h35
-rw-r--r--src/mongo/db/dbdirectclient.cpp34
-rw-r--r--src/mongo/db/dbdirectclient.h6
-rw-r--r--src/mongo/db/dbhelpers.cpp113
-rw-r--r--src/mongo/db/dbhelpers.h22
-rw-r--r--src/mongo/db/dbwebserver.cpp38
-rw-r--r--src/mongo/db/dbwebserver.h8
-rw-r--r--src/mongo/db/exec/and_hash.cpp8
-rw-r--r--src/mongo/db/exec/and_hash.h2
-rw-r--r--src/mongo/db/exec/and_sorted.cpp4
-rw-r--r--src/mongo/db/exec/and_sorted.h2
-rw-r--r--src/mongo/db/exec/cached_plan.cpp8
-rw-r--r--src/mongo/db/exec/cached_plan.h4
-rw-r--r--src/mongo/db/exec/collection_scan.cpp8
-rw-r--r--src/mongo/db/exec/collection_scan.h4
-rw-r--r--src/mongo/db/exec/count.cpp4
-rw-r--r--src/mongo/db/exec/count.h2
-rw-r--r--src/mongo/db/exec/count_scan.cpp12
-rw-r--r--src/mongo/db/exec/count_scan.h4
-rw-r--r--src/mongo/db/exec/delete.cpp4
-rw-r--r--src/mongo/db/exec/delete.h2
-rw-r--r--src/mongo/db/exec/distinct_scan.cpp4
-rw-r--r--src/mongo/db/exec/distinct_scan.h2
-rw-r--r--src/mongo/db/exec/fetch.cpp8
-rw-r--r--src/mongo/db/exec/fetch.h4
-rw-r--r--src/mongo/db/exec/geo_near.cpp57
-rw-r--r--src/mongo/db/exec/geo_near.h12
-rw-r--r--src/mongo/db/exec/group.cpp4
-rw-r--r--src/mongo/db/exec/group.h2
-rw-r--r--src/mongo/db/exec/idhack.cpp12
-rw-r--r--src/mongo/db/exec/idhack.h6
-rw-r--r--src/mongo/db/exec/index_iterator.cpp4
-rw-r--r--src/mongo/db/exec/index_iterator.h2
-rw-r--r--src/mongo/db/exec/index_scan.cpp6
-rw-r--r--src/mongo/db/exec/index_scan.h4
-rw-r--r--src/mongo/db/exec/merge_sort.cpp4
-rw-r--r--src/mongo/db/exec/merge_sort.h2
-rw-r--r--src/mongo/db/exec/multi_iterator.cpp8
-rw-r--r--src/mongo/db/exec/multi_iterator.h6
-rw-r--r--src/mongo/db/exec/multi_plan.cpp20
-rw-r--r--src/mongo/db/exec/multi_plan.h6
-rw-r--r--src/mongo/db/exec/near.cpp8
-rw-r--r--src/mongo/db/exec/near.h8
-rw-r--r--src/mongo/db/exec/oplogstart.cpp8
-rw-r--r--src/mongo/db/exec/oplogstart.h4
-rw-r--r--src/mongo/db/exec/or.cpp2
-rw-r--r--src/mongo/db/exec/or.h2
-rw-r--r--src/mongo/db/exec/plan_stage.cpp6
-rw-r--r--src/mongo/db/exec/plan_stage.h4
-rw-r--r--src/mongo/db/exec/sort.cpp4
-rw-r--r--src/mongo/db/exec/sort.h2
-rw-r--r--src/mongo/db/exec/sort_key_generator.cpp8
-rw-r--r--src/mongo/db/exec/sort_key_generator.h6
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp63
-rw-r--r--src/mongo/db/exec/subplan.cpp4
-rw-r--r--src/mongo/db/exec/subplan.h2
-rw-r--r--src/mongo/db/exec/text.cpp14
-rw-r--r--src/mongo/db/exec/text.h4
-rw-r--r--src/mongo/db/exec/text_or.cpp14
-rw-r--r--src/mongo/db/exec/text_or.h4
-rw-r--r--src/mongo/db/exec/update.cpp13
-rw-r--r--src/mongo/db/exec/update.h4
-rw-r--r--src/mongo/db/exec/working_set_common.cpp8
-rw-r--r--src/mongo/db/exec/working_set_common.h8
-rw-r--r--src/mongo/db/exec/write_stage_common.cpp8
-rw-r--r--src/mongo/db/exec/write_stage_common.h2
-rw-r--r--src/mongo/db/ftdc/collector.cpp8
-rw-r--r--src/mongo/db/ftdc/collector.h2
-rw-r--r--src/mongo/db/ftdc/controller_test.cpp2
-rw-r--r--src/mongo/db/ftdc/ftdc_commands.cpp5
-rw-r--r--src/mongo/db/ftdc/ftdc_mongod.cpp4
-rw-r--r--src/mongo/db/ftdc/ftdc_system_stats_linux.cpp2
-rw-r--r--src/mongo/db/ftdc/ftdc_system_stats_windows.cpp2
-rw-r--r--src/mongo/db/index/haystack_access_method.cpp6
-rw-r--r--src/mongo/db/index/haystack_access_method.h2
-rw-r--r--src/mongo/db/index/haystack_access_method_internal.h10
-rw-r--r--src/mongo/db/index/index_access_method.cpp115
-rw-r--r--src/mongo/db/index/index_access_method.h36
-rw-r--r--src/mongo/db/index_builder.cpp48
-rw-r--r--src/mongo/db/index_builder.h4
-rw-r--r--src/mongo/db/index_legacy.cpp4
-rw-r--r--src/mongo/db/index_legacy.h2
-rw-r--r--src/mongo/db/index_rebuilder.cpp30
-rw-r--r--src/mongo/db/index_rebuilder.h2
-rw-r--r--src/mongo/db/introspect.cpp52
-rw-r--r--src/mongo/db/introspect.h4
-rw-r--r--src/mongo/db/matcher/expression_text.cpp10
-rw-r--r--src/mongo/db/matcher/expression_text.h2
-rw-r--r--src/mongo/db/matcher/expression_where.cpp11
-rw-r--r--src/mongo/db/matcher/expression_where.h4
-rw-r--r--src/mongo/db/matcher/extensions_callback_real.cpp8
-rw-r--r--src/mongo/db/matcher/extensions_callback_real.h11
-rw-r--r--src/mongo/db/op_observer.h31
-rw-r--r--src/mongo/db/op_observer_impl.cpp156
-rw-r--r--src/mongo/db/op_observer_impl.h30
-rw-r--r--src/mongo/db/op_observer_noop.h30
-rw-r--r--src/mongo/db/operation_context.h50
-rw-r--r--src/mongo/db/operation_context_test.cpp109
-rw-r--r--src/mongo/db/ops/delete.cpp10
-rw-r--r--src/mongo/db/ops/delete.h2
-rw-r--r--src/mongo/db/ops/parsed_delete.cpp8
-rw-r--r--src/mongo/db/ops/parsed_delete.h4
-rw-r--r--src/mongo/db/ops/parsed_update.cpp16
-rw-r--r--src/mongo/db/ops/parsed_update.h4
-rw-r--r--src/mongo/db/ops/update.cpp26
-rw-r--r--src/mongo/db/ops/update.h2
-rw-r--r--src/mongo/db/ops/update_driver.cpp5
-rw-r--r--src/mongo/db/ops/update_driver.h2
-rw-r--r--src/mongo/db/ops/update_driver_test.cpp60
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp218
-rw-r--r--src/mongo/db/ops/write_ops_exec.h6
-rw-r--r--src/mongo/db/pipeline/expression_context_for_test.h4
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp53
-rw-r--r--src/mongo/db/pipeline/pipeline_d.h2
-rw-r--r--src/mongo/db/prefetch.cpp27
-rw-r--r--src/mongo/db/prefetch.h2
-rw-r--r--src/mongo/db/query/canonical_query.cpp10
-rw-r--r--src/mongo/db/query/canonical_query.h12
-rw-r--r--src/mongo/db/query/canonical_query_test.cpp46
-rw-r--r--src/mongo/db/query/find.cpp83
-rw-r--r--src/mongo/db/query/find.h16
-rw-r--r--src/mongo/db/query/get_executor.cpp186
-rw-r--r--src/mongo/db/query/get_executor.h16
-rw-r--r--src/mongo/db/query/get_executor_test.cpp4
-rw-r--r--src/mongo/db/query/internal_plans.cpp42
-rw-r--r--src/mongo/db/query/internal_plans.h12
-rw-r--r--src/mongo/db/query/parsed_distinct.cpp4
-rw-r--r--src/mongo/db/query/parsed_distinct.h2
-rw-r--r--src/mongo/db/query/parsed_distinct_test.cpp12
-rw-r--r--src/mongo/db/query/plan_cache_test.cpp28
-rw-r--r--src/mongo/db/query/plan_executor.cpp14
-rw-r--r--src/mongo/db/query/plan_executor.h4
-rw-r--r--src/mongo/db/query/query_planner_test.cpp10
-rw-r--r--src/mongo/db/query/query_planner_test_fixture.cpp10
-rw-r--r--src/mongo/db/query/query_planner_test_fixture.h2
-rw-r--r--src/mongo/db/query/query_yield.cpp10
-rw-r--r--src/mongo/db/query/query_yield.h2
-rw-r--r--src/mongo/db/query/stage_builder.cpp89
-rw-r--r--src/mongo/db/query/stage_builder.h2
-rw-r--r--src/mongo/db/range_deleter.cpp30
-rw-r--r--src/mongo/db/range_deleter.h10
-rw-r--r--src/mongo/db/range_deleter_db_env.cpp12
-rw-r--r--src/mongo/db/range_deleter_db_env.h4
-rw-r--r--src/mongo/db/range_deleter_mock_env.cpp4
-rw-r--r--src/mongo/db/range_deleter_mock_env.h4
-rw-r--r--src/mongo/db/range_deleter_test.cpp6
-rw-r--r--src/mongo/db/read_concern.cpp40
-rw-r--r--src/mongo/db/read_concern.h6
-rw-r--r--src/mongo/db/repair_database.cpp54
-rw-r--r--src/mongo/db/repair_database.h2
-rw-r--r--src/mongo/db/repl/bgsync.cpp79
-rw-r--r--src/mongo/db/repl/bgsync.h22
-rw-r--r--src/mongo/db/repl/collection_bulk_loader_impl.cpp50
-rw-r--r--src/mongo/db/repl/collection_bulk_loader_impl.h4
-rw-r--r--src/mongo/db/repl/collection_cloner.cpp11
-rw-r--r--src/mongo/db/repl/collection_cloner_test.cpp20
-rw-r--r--src/mongo/db/repl/data_replicator.cpp50
-rw-r--r--src/mongo/db/repl/data_replicator.h6
-rw-r--r--src/mongo/db/repl/data_replicator_external_state.h6
-rw-r--r--src/mongo/db/repl/data_replicator_external_state_impl.cpp12
-rw-r--r--src/mongo/db/repl/data_replicator_external_state_impl.h6
-rw-r--r--src/mongo/db/repl/data_replicator_external_state_mock.cpp8
-rw-r--r--src/mongo/db/repl/data_replicator_external_state_mock.h6
-rw-r--r--src/mongo/db/repl/data_replicator_test.cpp311
-rw-r--r--src/mongo/db/repl/database_task.cpp38
-rw-r--r--src/mongo/db/repl/database_task_test.cpp28
-rw-r--r--src/mongo/db/repl/databases_cloner.cpp12
-rw-r--r--src/mongo/db/repl/databases_cloner_test.cpp16
-rw-r--r--src/mongo/db/repl/initial_sync.cpp14
-rw-r--r--src/mongo/db/repl/initial_sync.h4
-rw-r--r--src/mongo/db/repl/master_slave.cpp274
-rw-r--r--src/mongo/db/repl/master_slave.h34
-rw-r--r--src/mongo/db/repl/multiapplier.cpp4
-rw-r--r--src/mongo/db/repl/multiapplier_test.cpp8
-rw-r--r--src/mongo/db/repl/noop_writer.cpp27
-rw-r--r--src/mongo/db/repl/oplog.cpp268
-rw-r--r--src/mongo/db/repl/oplog.h22
-rw-r--r--src/mongo/db/repl/oplog_buffer.h20
-rw-r--r--src/mongo/db/repl/oplog_buffer_blocking_queue.cpp4
-rw-r--r--src/mongo/db/repl/oplog_buffer_blocking_queue.h20
-rw-r--r--src/mongo/db/repl/oplog_buffer_collection.cpp56
-rw-r--r--src/mongo/db/repl/oplog_buffer_collection.h28
-rw-r--r--src/mongo/db/repl/oplog_buffer_collection_test.cpp360
-rw-r--r--src/mongo/db/repl/oplog_buffer_proxy.cpp38
-rw-r--r--src/mongo/db/repl/oplog_buffer_proxy.h20
-rw-r--r--src/mongo/db/repl/oplog_buffer_proxy_test.cpp106
-rw-r--r--src/mongo/db/repl/oplog_interface_local.cpp25
-rw-r--r--src/mongo/db/repl/oplog_interface_local.h4
-rw-r--r--src/mongo/db/repl/oplogreader.cpp2
-rw-r--r--src/mongo/db/repl/oplogreader.h2
-rw-r--r--src/mongo/db/repl/repl_client_info.cpp7
-rw-r--r--src/mongo/db/repl/repl_client_info.h2
-rw-r--r--src/mongo/db/repl/repl_set_commands.cpp62
-rw-r--r--src/mongo/db/repl/repl_set_request_votes.cpp6
-rw-r--r--src/mongo/db/repl/repl_set_web_handler.cpp6
-rw-r--r--src/mongo/db/repl/replication_coordinator.h57
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state.h51
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp257
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.h56
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_mock.cpp37
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_mock.h44
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp205
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.h59
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp34
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp4
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp112
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp8
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp8
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp12
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp81
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_test.cpp468
-rw-r--r--src/mongo/db/repl/replication_coordinator_mock.cpp56
-rw-r--r--src/mongo/db/repl/replication_coordinator_mock.h48
-rw-r--r--src/mongo/db/repl/replication_coordinator_test_fixture.cpp20
-rw-r--r--src/mongo/db/repl/replication_coordinator_test_fixture.h2
-rw-r--r--src/mongo/db/repl/replication_executor.cpp12
-rw-r--r--src/mongo/db/repl/replication_executor.h4
-rw-r--r--src/mongo/db/repl/replication_executor_test.cpp24
-rw-r--r--src/mongo/db/repl/replication_info.cpp36
-rw-r--r--src/mongo/db/repl/resync.cpp18
-rw-r--r--src/mongo/db/repl/rollback_source.h2
-rw-r--r--src/mongo/db/repl/rollback_source_impl.cpp4
-rw-r--r--src/mongo/db/repl/rollback_source_impl.h3
-rw-r--r--src/mongo/db/repl/rs_initialsync.cpp92
-rw-r--r--src/mongo/db/repl/rs_initialsync.h4
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp120
-rw-r--r--src/mongo/db/repl/rs_rollback.h4
-rw-r--r--src/mongo/db/repl/rs_rollback_test.cpp231
-rw-r--r--src/mongo/db/repl/storage_interface.cpp4
-rw-r--r--src/mongo/db/repl/storage_interface.h42
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp196
-rw-r--r--src/mongo/db/repl/storage_interface_impl.h45
-rw-r--r--src/mongo/db/repl/storage_interface_impl_test.cpp501
-rw-r--r--src/mongo/db/repl/storage_interface_mock.cpp20
-rw-r--r--src/mongo/db/repl/storage_interface_mock.h95
-rw-r--r--src/mongo/db/repl/sync_source_feedback.cpp31
-rw-r--r--src/mongo/db/repl/sync_tail.cpp211
-rw-r--r--src/mongo/db/repl/sync_tail.h28
-rw-r--r--src/mongo/db/repl/sync_tail_test.cpp273
-rw-r--r--src/mongo/db/repl/task_runner.cpp22
-rw-r--r--src/mongo/db/repl/task_runner.h2
-rw-r--r--src/mongo/db/repl/task_runner_test.cpp22
-rw-r--r--src/mongo/db/restapi.cpp26
-rw-r--r--src/mongo/db/restapi.h2
-rw-r--r--src/mongo/db/run_commands.cpp10
-rw-r--r--src/mongo/db/run_commands.h2
-rw-r--r--src/mongo/db/s/active_migrations_registry.cpp10
-rw-r--r--src/mongo/db/s/active_migrations_registry.h4
-rw-r--r--src/mongo/db/s/balancer/balancer.cpp127
-rw-r--r--src/mongo/db/s/balancer/balancer.h24
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy.h8
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp54
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h14
-rw-r--r--src/mongo/db/s/balancer/cluster_statistics.h2
-rw-r--r--src/mongo/db/s/balancer/cluster_statistics_impl.cpp18
-rw-r--r--src/mongo/db/s/balancer/cluster_statistics_impl.h2
-rw-r--r--src/mongo/db/s/balancer/migration_manager.cpp88
-rw-r--r--src/mongo/db/s/balancer/migration_manager.h16
-rw-r--r--src/mongo/db/s/balancer/migration_manager_test.cpp66
-rw-r--r--src/mongo/db/s/balancer/scoped_migration_request.cpp48
-rw-r--r--src/mongo/db/s/balancer/scoped_migration_request.h8
-rw-r--r--src/mongo/db/s/check_sharding_index_command.cpp12
-rw-r--r--src/mongo/db/s/chunk_move_write_concern_options.cpp4
-rw-r--r--src/mongo/db/s/chunk_move_write_concern_options.h2
-rw-r--r--src/mongo/db/s/cleanup_orphaned_cmd.cpp18
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp20
-rw-r--r--src/mongo/db/s/collection_range_deleter.cpp33
-rw-r--r--src/mongo/db/s/collection_range_deleter.h4
-rw-r--r--src/mongo/db/s/collection_sharding_state.cpp117
-rw-r--r--src/mongo/db/s/collection_sharding_state.h26
-rw-r--r--src/mongo/db/s/collection_sharding_state_test.cpp23
-rw-r--r--src/mongo/db/s/config/configsvr_add_shard_command.cpp8
-rw-r--r--src/mongo/db/s/config/configsvr_add_shard_to_zone_command.cpp6
-rw-r--r--src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp6
-rw-r--r--src/mongo/db/s/config/configsvr_control_balancer_command.cpp24
-rw-r--r--src/mongo/db/s/config/configsvr_merge_chunk_command.cpp12
-rw-r--r--src/mongo/db/s/config/configsvr_move_chunk_command.cpp16
-rw-r--r--src/mongo/db/s/config/configsvr_remove_shard_from_zone_command.cpp6
-rw-r--r--src/mongo/db/s/config/configsvr_set_feature_compatibility_version_command.cpp8
-rw-r--r--src/mongo/db/s/config/configsvr_split_chunk_command.cpp14
-rw-r--r--src/mongo/db/s/config/configsvr_update_zone_key_range_command.cpp10
-rw-r--r--src/mongo/db/s/get_shard_version_command.cpp12
-rw-r--r--src/mongo/db/s/merge_chunks_command.cpp34
-rw-r--r--src/mongo/db/s/metadata_loader.cpp30
-rw-r--r--src/mongo/db/s/metadata_loader.h10
-rw-r--r--src/mongo/db/s/metadata_loader_test.cpp20
-rw-r--r--src/mongo/db/s/metadata_manager_test.cpp6
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source.h16
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp98
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.h26
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp22
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp2
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp124
-rw-r--r--src/mongo/db/s/migration_destination_manager.h10
-rw-r--r--src/mongo/db/s/migration_destination_manager_legacy_commands.cpp20
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp166
-rw-r--r--src/mongo/db/s/migration_source_manager.h16
-rw-r--r--src/mongo/db/s/move_chunk_command.cpp38
-rw-r--r--src/mongo/db/s/move_timing_helper.cpp18
-rw-r--r--src/mongo/db/s/move_timing_helper.h4
-rw-r--r--src/mongo/db/s/operation_sharding_state.cpp22
-rw-r--r--src/mongo/db/s/operation_sharding_state.h10
-rw-r--r--src/mongo/db/s/set_shard_version_command.cpp30
-rw-r--r--src/mongo/db/s/shard_identity_rollback_notifier.cpp8
-rw-r--r--src/mongo/db/s/shard_identity_rollback_notifier.h4
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod.cpp4
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod.h2
-rw-r--r--src/mongo/db/s/sharding_server_status.cpp11
-rw-r--r--src/mongo/db/s/sharding_state.cpp102
-rw-r--r--src/mongo/db/s/sharding_state.h26
-rw-r--r--src/mongo/db/s/sharding_state_command.cpp4
-rw-r--r--src/mongo/db/s/sharding_state_recovery.cpp43
-rw-r--r--src/mongo/db/s/sharding_state_recovery.h6
-rw-r--r--src/mongo/db/s/sharding_state_test.cpp10
-rw-r--r--src/mongo/db/s/split_chunk_command.cpp42
-rw-r--r--src/mongo/db/s/split_vector_command.cpp14
-rw-r--r--src/mongo/db/s/unset_sharding_command.cpp4
-rw-r--r--src/mongo/db/server_parameters.h4
-rw-r--r--src/mongo/db/server_parameters_inline.h34
-rw-r--r--src/mongo/db/server_parameters_test.cpp4
-rw-r--r--src/mongo/db/service_context.cpp4
-rw-r--r--src/mongo/db/service_context.h12
-rw-r--r--src/mongo/db/service_context_d_test_fixture.cpp20
-rw-r--r--src/mongo/db/service_context_d_test_fixture.h2
-rw-r--r--src/mongo/db/stats/latency_server_status_section.cpp4
-rw-r--r--src/mongo/db/stats/lock_server_status_section.cpp8
-rw-r--r--src/mongo/db/stats/range_deleter_server_status.cpp2
-rw-r--r--src/mongo/db/stats/snapshots_webplugins.cpp2
-rw-r--r--src/mongo/db/stats/storage_stats.cpp22
-rw-r--r--src/mongo/db/stats/storage_stats.h2
-rw-r--r--src/mongo/db/stats/top.cpp16
-rw-r--r--src/mongo/db/stats/top.h8
-rw-r--r--src/mongo/db/storage/bson_collection_catalog_entry.cpp32
-rw-r--r--src/mongo/db/storage/bson_collection_catalog_entry.h18
-rw-r--r--src/mongo/db/storage/capped_callback.h2
-rw-r--r--src/mongo/db/storage/devnull/devnull_kv_engine.cpp58
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp46
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h4
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp85
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h44
-rw-r--r--src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp70
-rw-r--r--src/mongo/db/storage/kv/kv_collection_catalog_entry.h18
-rw-r--r--src/mongo/db/storage/kv/kv_database_catalog_entry.cpp6
-rw-r--r--src/mongo/db/storage/kv/kv_database_catalog_entry.h2
-rw-r--r--src/mongo/db/storage/kv/kv_database_catalog_entry_base.cpp36
-rw-r--r--src/mongo/db/storage/kv/kv_database_catalog_entry_base.h6
-rw-r--r--src/mongo/db/storage/kv/kv_database_catalog_entry_mock.cpp2
-rw-r--r--src/mongo/db/storage/kv/kv_database_catalog_entry_mock.h2
-rw-r--r--src/mongo/db/storage/kv/kv_engine.h6
-rw-r--r--src/mongo/db/storage/kv/kv_engine_test_snapshots.cpp27
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine.cpp28
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine.h12
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp95
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp460
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_logic.h118
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp496
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_test_help.cpp26
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_test_help.h10
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/hashtab.h8
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp38
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details.h21
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp165
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.h42
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp61
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.h32
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp39
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_index.h18
-rw-r--r--src/mongo/db/storage/mmap_v1/data_file.cpp31
-rw-r--r--src/mongo/db/storage/mmap_v1/data_file.h18
-rw-r--r--src/mongo/db/storage/mmap_v1/data_file_sync.cpp6
-rw-r--r--src/mongo/db/storage/mmap_v1/data_file_sync.h3
-rw-r--r--src/mongo/db/storage/mmap_v1/dur.cpp51
-rw-r--r--src/mongo/db/storage/mmap_v1/dur.h6
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_journal_writer.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_recover.cpp75
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_recover.h22
-rw-r--r--src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp20
-rw-r--r--src/mongo/db/storage/mmap_v1/durable_mapped_file.h10
-rw-r--r--src/mongo/db/storage/mmap_v1/extent_manager.h16
-rw-r--r--src/mongo/db/storage/mmap_v1/heap_record_store_btree.cpp16
-rw-r--r--src/mongo/db/storage/mmap_v1/heap_record_store_btree.h38
-rw-r--r--src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap.cpp48
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap.h54
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_posix.cpp16
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp184
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h34
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp30
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_engine.h14
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp130
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h32
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp18
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_windows.cpp32
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp249
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_base.h112
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp221
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_capped.h38
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.cpp22
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.h12
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp302
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.cpp16
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.h12
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp142
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_simple.h20
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.cpp24
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.h12
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp146
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp90
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h55
-rw-r--r--src/mongo/db/storage/mmap_v1/repair_database.cpp64
-rw-r--r--src/mongo/db/storage/record_fetcher.h2
-rw-r--r--src/mongo/db/storage/record_store.h70
-rw-r--r--src/mongo/db/storage/record_store_test_capped_visibility.cpp58
-rw-r--r--src/mongo/db/storage/record_store_test_updaterecord.h10
-rw-r--r--src/mongo/db/storage/snapshot_manager.h4
-rw-r--r--src/mongo/db/storage/sorted_data_interface.h43
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_harness.cpp12
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_harness.h4
-rw-r--r--src/mongo/db/storage/storage_engine.h12
-rw-r--r--src/mongo/db/storage/storage_init.cpp5
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp122
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.h30
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp8
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp8
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h6
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_parameters.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp326
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h70
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp16
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h12
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp28
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp8
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h8
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_server_status.cpp4
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_server_status.h3
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp9
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp8
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_util.h2
-rw-r--r--src/mongo/db/ttl.cpp26
-rw-r--r--src/mongo/db/views/durable_view_catalog.cpp42
-rw-r--r--src/mongo/db/views/durable_view_catalog.h14
-rw-r--r--src/mongo/db/views/view_catalog.cpp87
-rw-r--r--src/mongo/db/views/view_catalog.h28
-rw-r--r--src/mongo/db/views/view_catalog_test.cpp6
-rw-r--r--src/mongo/db/write_concern.cpp30
-rw-r--r--src/mongo/db/write_concern.h6
-rw-r--r--src/mongo/dbtests/clienttests.cpp120
-rw-r--r--src/mongo/dbtests/commandtests.cpp10
-rw-r--r--src/mongo/dbtests/counttests.cpp28
-rw-r--r--src/mongo/dbtests/dbhelper_tests.cpp20
-rw-r--r--src/mongo/dbtests/dbtests.cpp16
-rw-r--r--src/mongo/dbtests/dbtests.h7
-rw-r--r--src/mongo/dbtests/directclienttests.cpp48
-rw-r--r--src/mongo/dbtests/extensions_callback_real_test.cpp84
-rw-r--r--src/mongo/dbtests/framework.cpp4
-rw-r--r--src/mongo/dbtests/gle_test.cpp18
-rw-r--r--src/mongo/dbtests/indexcatalogtests.cpp98
-rw-r--r--src/mongo/dbtests/indexupdatetests.cpp102
-rw-r--r--src/mongo/dbtests/jstests.cpp36
-rw-r--r--src/mongo/dbtests/matchertests.cpp8
-rw-r--r--src/mongo/dbtests/mmaptests.cpp29
-rw-r--r--src/mongo/dbtests/namespacetests.cpp124
-rw-r--r--src/mongo/dbtests/oplogstarttests.cpp20
-rw-r--r--src/mongo/dbtests/pdfiletests.cpp32
-rw-r--r--src/mongo/dbtests/perftests.cpp18
-rw-r--r--src/mongo/dbtests/plan_ranking.cpp50
-rw-r--r--src/mongo/dbtests/query_plan_executor.cpp75
-rw-r--r--src/mongo/dbtests/query_stage_and.cpp289
-rw-r--r--src/mongo/dbtests/query_stage_cached_plan.cpp48
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp56
-rw-r--r--src/mongo/dbtests/query_stage_count.cpp56
-rw-r--r--src/mongo/dbtests/query_stage_count_scan.cpp62
-rw-r--r--src/mongo/dbtests/query_stage_delete.cpp36
-rw-r--r--src/mongo/dbtests/query_stage_distinct.cpp26
-rw-r--r--src/mongo/dbtests/query_stage_ensure_sorted.cpp16
-rw-r--r--src/mongo/dbtests/query_stage_fetch.cpp32
-rw-r--r--src/mongo/dbtests/query_stage_ixscan.cpp28
-rw-r--r--src/mongo/dbtests/query_stage_keep.cpp25
-rw-r--r--src/mongo/dbtests/query_stage_merge_sort.cpp164
-rw-r--r--src/mongo/dbtests/query_stage_multiplan.cpp70
-rw-r--r--src/mongo/dbtests/query_stage_near.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp94
-rw-r--r--src/mongo/dbtests/query_stage_subplan.cpp70
-rw-r--r--src/mongo/dbtests/query_stage_tests.cpp22
-rw-r--r--src/mongo/dbtests/query_stage_update.cpp64
-rw-r--r--src/mongo/dbtests/querytests.cpp173
-rw-r--r--src/mongo/dbtests/replica_set_tests.cpp36
-rw-r--r--src/mongo/dbtests/repltests.cpp112
-rw-r--r--src/mongo/dbtests/rollbacktests.cpp432
-rw-r--r--src/mongo/dbtests/sort_key_generator_test.cpp8
-rw-r--r--src/mongo/dbtests/updatetests.cpp10
-rw-r--r--src/mongo/dbtests/validate_tests.cpp272
-rw-r--r--src/mongo/executor/network_interface_asio.cpp2
-rw-r--r--src/mongo/executor/network_interface_asio_test.cpp2
-rw-r--r--src/mongo/executor/remote_command_request.cpp8
-rw-r--r--src/mongo/executor/remote_command_request.h14
-rw-r--r--src/mongo/executor/task_executor.cpp2
-rw-r--r--src/mongo/executor/task_executor.h4
-rw-r--r--src/mongo/rpc/metadata.cpp16
-rw-r--r--src/mongo/rpc/metadata.h4
-rw-r--r--src/mongo/rpc/metadata/client_metadata_ismaster.cpp14
-rw-r--r--src/mongo/rpc/metadata/client_metadata_ismaster.h4
-rw-r--r--src/mongo/rpc/metadata/egress_metadata_hook_list.cpp4
-rw-r--r--src/mongo/rpc/metadata/egress_metadata_hook_list.h2
-rw-r--r--src/mongo/rpc/metadata/egress_metadata_hook_list_test.cpp4
-rw-r--r--src/mongo/rpc/metadata/metadata_hook.h5
-rw-r--r--src/mongo/rpc/protocol.h4
-rw-r--r--src/mongo/s/async_requests_sender.cpp31
-rw-r--r--src/mongo/s/async_requests_sender.h15
-rw-r--r--src/mongo/s/balancer_configuration.cpp30
-rw-r--r--src/mongo/s/balancer_configuration.h10
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog.h23
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_impl.cpp53
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_impl.h27
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_mock.cpp23
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_mock.h23
-rw-r--r--src/mongo/s/catalog/dist_lock_manager.cpp20
-rw-r--r--src/mongo/s/catalog/dist_lock_manager.h20
-rw-r--r--src/mongo/s/catalog/dist_lock_manager_mock.cpp14
-rw-r--r--src/mongo/s/catalog/dist_lock_manager_mock.h16
-rw-r--r--src/mongo/s/catalog/replset_dist_lock_manager.cpp52
-rw-r--r--src/mongo/s/catalog/replset_dist_lock_manager.h16
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client.h54
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp287
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.h74
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_mock.cpp59
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_mock.h54
-rw-r--r--src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp16
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager.h24
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp66
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_impl.cpp50
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_impl.h36
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp75
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_zone_operations_impl.cpp60
-rw-r--r--src/mongo/s/catalog_cache.cpp6
-rw-r--r--src/mongo/s/catalog_cache.h2
-rw-r--r--src/mongo/s/chunk_diff.cpp6
-rw-r--r--src/mongo/s/chunk_diff.h6
-rw-r--r--src/mongo/s/chunk_diff_test.cpp6
-rw-r--r--src/mongo/s/chunk_manager.cpp27
-rw-r--r--src/mongo/s/chunk_manager.h6
-rw-r--r--src/mongo/s/client/parallel.cpp52
-rw-r--r--src/mongo/s/client/parallel.h14
-rw-r--r--src/mongo/s/client/shard.cpp28
-rw-r--r--src/mongo/s/client/shard.h18
-rw-r--r--src/mongo/s/client/shard_connection.cpp18
-rw-r--r--src/mongo/s/client/shard_connection.h2
-rw-r--r--src/mongo/s/client/shard_local.cpp30
-rw-r--r--src/mongo/s/client/shard_local.h8
-rw-r--r--src/mongo/s/client/shard_local_test.cpp20
-rw-r--r--src/mongo/s/client/shard_registry.cpp44
-rw-r--r--src/mongo/s/client/shard_registry.h8
-rw-r--r--src/mongo/s/client/shard_remote.cpp38
-rw-r--r--src/mongo/s/client/shard_remote.h9
-rw-r--r--src/mongo/s/client/sharding_connection_hook.cpp4
-rw-r--r--src/mongo/s/client/version_manager.cpp34
-rw-r--r--src/mongo/s/cluster_identity_loader.cpp10
-rw-r--r--src/mongo/s/cluster_identity_loader.h4
-rw-r--r--src/mongo/s/commands/chunk_manager_targeter.cpp57
-rw-r--r--src/mongo/s/commands/chunk_manager_targeter.h18
-rw-r--r--src/mongo/s/commands/cluster_add_shard_cmd.cpp10
-rw-r--r--src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_aggregate.cpp46
-rw-r--r--src/mongo/s/commands/cluster_aggregate.h6
-rw-r--r--src/mongo/s/commands/cluster_apply_ops_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_available_query_options_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_commands_common.cpp18
-rw-r--r--src/mongo/s/commands/cluster_commands_common.h4
-rw-r--r--src/mongo/s/commands/cluster_control_balancer_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_count_cmd.cpp14
-rw-r--r--src/mongo/s/commands/cluster_drop_cmd.cpp22
-rw-r--r--src/mongo/s/commands/cluster_drop_database_cmd.cpp41
-rw-r--r--src/mongo/s/commands/cluster_enable_sharding_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_explain.cpp10
-rw-r--r--src/mongo/s/commands/cluster_explain.h4
-rw-r--r--src/mongo/s/commands/cluster_explain_cmd.cpp8
-rw-r--r--src/mongo/s/commands/cluster_find_and_modify_cmd.cpp46
-rw-r--r--src/mongo/s/commands/cluster_find_cmd.cpp16
-rw-r--r--src/mongo/s/commands/cluster_flush_router_config_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_fsync_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_ftdc_commands.cpp2
-rw-r--r--src/mongo/s/commands/cluster_get_last_error_cmd.cpp17
-rw-r--r--src/mongo/s/commands/cluster_get_prev_error_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_get_shard_map_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_get_shard_version_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_getmore_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_index_filter_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_is_db_grid_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_is_master_cmd.cpp12
-rw-r--r--src/mongo/s/commands/cluster_kill_op.cpp4
-rw-r--r--src/mongo/s/commands/cluster_killcursors_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_list_databases_cmd.cpp10
-rw-r--r--src/mongo/s/commands/cluster_list_shards_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_map_reduce_cmd.cpp62
-rw-r--r--src/mongo/s/commands/cluster_merge_chunks_cmd.cpp8
-rw-r--r--src/mongo/s/commands/cluster_move_chunk_cmd.cpp15
-rw-r--r--src/mongo/s/commands/cluster_move_primary_cmd.cpp32
-rw-r--r--src/mongo/s/commands/cluster_netstat_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_pipeline_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_plan_cache_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_profile_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_remove_shard_cmd.cpp12
-rw-r--r--src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_repl_set_get_status_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_reset_error_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_set_feature_compatibility_version_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_shard_collection_cmd.cpp36
-rw-r--r--src/mongo/s/commands/cluster_shutdown_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_split_cmd.cpp18
-rw-r--r--src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_user_management_commands.cpp122
-rw-r--r--src/mongo/s/commands/cluster_whats_my_uri_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_write.cpp53
-rw-r--r--src/mongo/s/commands/cluster_write.h4
-rw-r--r--src/mongo/s/commands/cluster_write_cmd.cpp35
-rw-r--r--src/mongo/s/commands/commands_public.cpp194
-rw-r--r--src/mongo/s/commands/run_on_all_shards_cmd.cpp12
-rw-r--r--src/mongo/s/commands/run_on_all_shards_cmd.h4
-rw-r--r--src/mongo/s/commands/strategy.cpp82
-rw-r--r--src/mongo/s/commands/strategy.h16
-rw-r--r--src/mongo/s/config.cpp36
-rw-r--r--src/mongo/s/config.h8
-rw-r--r--src/mongo/s/config_server_client.cpp12
-rw-r--r--src/mongo/s/config_server_client.h4
-rw-r--r--src/mongo/s/config_server_test_fixture.cpp20
-rw-r--r--src/mongo/s/config_server_test_fixture.h10
-rw-r--r--src/mongo/s/grid.h2
-rw-r--r--src/mongo/s/local_sharding_info.cpp6
-rw-r--r--src/mongo/s/local_sharding_info.h2
-rw-r--r--src/mongo/s/ns_targeter.h8
-rw-r--r--src/mongo/s/query/async_results_merger.cpp24
-rw-r--r--src/mongo/s/query/async_results_merger.h10
-rw-r--r--src/mongo/s/query/cluster_client_cursor.h4
-rw-r--r--src/mongo/s/query/cluster_client_cursor_impl.cpp18
-rw-r--r--src/mongo/s/query/cluster_client_cursor_impl.h10
-rw-r--r--src/mongo/s/query/cluster_client_cursor_mock.cpp4
-rw-r--r--src/mongo/s/query/cluster_client_cursor_mock.h4
-rw-r--r--src/mongo/s/query/cluster_cursor_manager.cpp10
-rw-r--r--src/mongo/s/query/cluster_cursor_manager.h6
-rw-r--r--src/mongo/s/query/cluster_find.cpp41
-rw-r--r--src/mongo/s/query/cluster_find.h4
-rw-r--r--src/mongo/s/query/router_exec_stage.h4
-rw-r--r--src/mongo/s/query/router_stage_limit.cpp8
-rw-r--r--src/mongo/s/query/router_stage_limit.h4
-rw-r--r--src/mongo/s/query/router_stage_merge.cpp8
-rw-r--r--src/mongo/s/query/router_stage_merge.h4
-rw-r--r--src/mongo/s/query/router_stage_mock.cpp4
-rw-r--r--src/mongo/s/query/router_stage_mock.h4
-rw-r--r--src/mongo/s/query/router_stage_remove_sortkey.cpp8
-rw-r--r--src/mongo/s/query/router_stage_remove_sortkey.h4
-rw-r--r--src/mongo/s/query/router_stage_skip.cpp10
-rw-r--r--src/mongo/s/query/router_stage_skip.h4
-rw-r--r--src/mongo/s/query/store_possible_cursor.cpp6
-rw-r--r--src/mongo/s/query/store_possible_cursor.h2
-rw-r--r--src/mongo/s/s_sharding_server_status.cpp6
-rw-r--r--src/mongo/s/server.cpp22
-rw-r--r--src/mongo/s/service_entry_point_mongos.cpp20
-rw-r--r--src/mongo/s/shard_key_pattern.cpp5
-rw-r--r--src/mongo/s/shard_key_pattern.h2
-rw-r--r--src/mongo/s/shard_key_pattern_test.cpp4
-rw-r--r--src/mongo/s/shard_util.cpp18
-rw-r--r--src/mongo/s/shard_util.h6
-rw-r--r--src/mongo/s/sharding_egress_metadata_hook.cpp10
-rw-r--r--src/mongo/s/sharding_egress_metadata_hook.h4
-rw-r--r--src/mongo/s/sharding_initialization.cpp16
-rw-r--r--src/mongo/s/sharding_initialization.h6
-rw-r--r--src/mongo/s/sharding_raii.cpp32
-rw-r--r--src/mongo/s/sharding_raii.h10
-rw-r--r--src/mongo/s/sharding_uptime_reporter.cpp17
-rw-r--r--src/mongo/s/write_ops/batch_write_exec.cpp10
-rw-r--r--src/mongo/s/write_ops/batch_write_exec.h2
-rw-r--r--src/mongo/s/write_ops/batch_write_op.cpp4
-rw-r--r--src/mongo/s/write_ops/batch_write_op.h2
-rw-r--r--src/mongo/s/write_ops/batch_write_op_test.cpp150
-rw-r--r--src/mongo/s/write_ops/mock_ns_targeter.h10
-rw-r--r--src/mongo/s/write_ops/write_op.cpp8
-rw-r--r--src/mongo/s/write_ops/write_op.h2
-rw-r--r--src/mongo/s/write_ops/write_op_test.cpp24
-rw-r--r--src/mongo/scripting/dbdirectclient_factory.cpp10
-rw-r--r--src/mongo/scripting/dbdirectclient_factory.h4
-rw-r--r--src/mongo/scripting/engine.cpp30
-rw-r--r--src/mongo/scripting/engine.h10
-rw-r--r--src/mongo/scripting/mozjs/engine.cpp6
-rw-r--r--src/mongo/scripting/mozjs/implscope.cpp14
-rw-r--r--src/mongo/scripting/mozjs/implscope.h4
-rw-r--r--src/mongo/scripting/mozjs/mongo.cpp4
-rw-r--r--src/mongo/scripting/mozjs/proxyscope.cpp8
-rw-r--r--src/mongo/scripting/mozjs/proxyscope.h4
-rw-r--r--src/mongo/util/admin_access.h4
-rw-r--r--src/mongo/util/concurrency/notification.h14
-rw-r--r--src/mongo/util/fail_point_server_parameter.cpp2
-rw-r--r--src/mongo/util/fail_point_server_parameter.h2
-rw-r--r--src/mongo/util/heap_profiler.cpp2
-rw-r--r--src/mongo/util/progress_meter.h2
-rw-r--r--src/mongo/util/tcmalloc_server_status_section.cpp3
-rw-r--r--src/mongo/util/tcmalloc_set_parameter.cpp4
873 files changed, 14500 insertions, 14108 deletions
diff --git a/src/mongo/client/examples/mongoperf.cpp b/src/mongo/client/examples/mongoperf.cpp
index 04f83044955..80f43ae815d 100644
--- a/src/mongo/client/examples/mongoperf.cpp
+++ b/src/mongo/client/examples/mongoperf.cpp
@@ -173,11 +173,11 @@ void go() {
recSizeKB = 4;
verify(recSizeKB <= 64000 && recSizeKB > 0);
- auto txn = cc().makeOperationContext();
- MemoryMappedFile f(txn.get());
- ON_BLOCK_EXIT([&f, &txn] {
- LockMongoFilesExclusive lock(txn.get());
- f.close(txn.get());
+ auto opCtx = cc().makeOperationContext();
+ MemoryMappedFile f(opCtx.get());
+ ON_BLOCK_EXIT([&f, &opCtx] {
+ LockMongoFilesExclusive lock(opCtx.get());
+ f.close(opCtx.get());
});
cout << "creating test file size:";
@@ -216,8 +216,8 @@ void go() {
if (o["mmf"].trueValue()) {
delete lf;
lf = 0;
- mmfFile = new MemoryMappedFile(txn.get());
- mmf = (char*)mmfFile->map(txn.get(), fname);
+ mmfFile = new MemoryMappedFile(opCtx.get());
+ mmf = (char*)mmfFile->map(opCtx.get(), fname);
verify(mmf);
syncDelaySecs = options["syncDelay"].numberInt();
diff --git a/src/mongo/client/remote_command_targeter.h b/src/mongo/client/remote_command_targeter.h
index 98c6fa0c72c..55e47db6f8f 100644
--- a/src/mongo/client/remote_command_targeter.h
+++ b/src/mongo/client/remote_command_targeter.h
@@ -65,7 +65,7 @@ public:
* TODO(schwerin): Once operation max-time behavior is more uniformly integrated into sharding,
* remove the 20-second ceiling on wait time.
*/
- virtual StatusWith<HostAndPort> findHost(OperationContext* txn,
+ virtual StatusWith<HostAndPort> findHost(OperationContext* opCtx,
const ReadPreferenceSetting& readPref) = 0;
diff --git a/src/mongo/client/remote_command_targeter_factory_mock.cpp b/src/mongo/client/remote_command_targeter_factory_mock.cpp
index 41cf0949f61..fa62e92679a 100644
--- a/src/mongo/client/remote_command_targeter_factory_mock.cpp
+++ b/src/mongo/client/remote_command_targeter_factory_mock.cpp
@@ -47,9 +47,9 @@ public:
return _mock->connectionString();
}
- StatusWith<HostAndPort> findHost(OperationContext* txn,
+ StatusWith<HostAndPort> findHost(OperationContext* opCtx,
const ReadPreferenceSetting& readPref) override {
- return _mock->findHost(txn, readPref);
+ return _mock->findHost(opCtx, readPref);
}
StatusWith<HostAndPort> findHostWithMaxWait(const ReadPreferenceSetting& readPref,
diff --git a/src/mongo/client/remote_command_targeter_mock.cpp b/src/mongo/client/remote_command_targeter_mock.cpp
index b216a717a29..49e98c44603 100644
--- a/src/mongo/client/remote_command_targeter_mock.cpp
+++ b/src/mongo/client/remote_command_targeter_mock.cpp
@@ -52,7 +52,7 @@ ConnectionString RemoteCommandTargeterMock::connectionString() {
return _connectionStringReturnValue;
}
-StatusWith<HostAndPort> RemoteCommandTargeterMock::findHost(OperationContext* txn,
+StatusWith<HostAndPort> RemoteCommandTargeterMock::findHost(OperationContext* opCtx,
const ReadPreferenceSetting& readPref) {
return _findHostReturnValue;
}
diff --git a/src/mongo/client/remote_command_targeter_mock.h b/src/mongo/client/remote_command_targeter_mock.h
index 23dbb92a6ba..06ba67f1b0e 100644
--- a/src/mongo/client/remote_command_targeter_mock.h
+++ b/src/mongo/client/remote_command_targeter_mock.h
@@ -57,7 +57,7 @@ public:
StatusWith<HostAndPort> findHostWithMaxWait(const ReadPreferenceSetting& readPref,
Milliseconds maxWait) override;
- StatusWith<HostAndPort> findHost(OperationContext* txn,
+ StatusWith<HostAndPort> findHost(OperationContext* opCtx,
const ReadPreferenceSetting& readPref) override;
/**
diff --git a/src/mongo/client/remote_command_targeter_rs.cpp b/src/mongo/client/remote_command_targeter_rs.cpp
index 59ae0d9cdbf..0bf8bedb837 100644
--- a/src/mongo/client/remote_command_targeter_rs.cpp
+++ b/src/mongo/client/remote_command_targeter_rs.cpp
@@ -65,12 +65,12 @@ StatusWith<HostAndPort> RemoteCommandTargeterRS::findHostWithMaxWait(
return _rsMonitor->getHostOrRefresh(readPref, maxWait);
}
-StatusWith<HostAndPort> RemoteCommandTargeterRS::findHost(OperationContext* txn,
+StatusWith<HostAndPort> RemoteCommandTargeterRS::findHost(OperationContext* opCtx,
const ReadPreferenceSetting& readPref) {
- auto clock = txn->getServiceContext()->getFastClockSource();
+ auto clock = opCtx->getServiceContext()->getFastClockSource();
auto startDate = clock->now();
while (true) {
- const auto interruptStatus = txn->checkForInterruptNoAssert();
+ const auto interruptStatus = opCtx->checkForInterruptNoAssert();
if (!interruptStatus.isOK()) {
return interruptStatus;
}
diff --git a/src/mongo/client/remote_command_targeter_rs.h b/src/mongo/client/remote_command_targeter_rs.h
index 7a8851c01ee..c64ef8431e1 100644
--- a/src/mongo/client/remote_command_targeter_rs.h
+++ b/src/mongo/client/remote_command_targeter_rs.h
@@ -52,7 +52,7 @@ public:
ConnectionString connectionString() override;
- StatusWith<HostAndPort> findHost(OperationContext* txn,
+ StatusWith<HostAndPort> findHost(OperationContext* opCtx,
const ReadPreferenceSetting& readPref) override;
StatusWith<HostAndPort> findHostWithMaxWait(const ReadPreferenceSetting& readPref,
diff --git a/src/mongo/client/remote_command_targeter_standalone.cpp b/src/mongo/client/remote_command_targeter_standalone.cpp
index b44cb1b4c44..ad93df1f05e 100644
--- a/src/mongo/client/remote_command_targeter_standalone.cpp
+++ b/src/mongo/client/remote_command_targeter_standalone.cpp
@@ -48,7 +48,7 @@ StatusWith<HostAndPort> RemoteCommandTargeterStandalone::findHostWithMaxWait(
}
StatusWith<HostAndPort> RemoteCommandTargeterStandalone::findHost(
- OperationContext* txn, const ReadPreferenceSetting& readPref) {
+ OperationContext* opCtx, const ReadPreferenceSetting& readPref) {
return _hostAndPort;
}
diff --git a/src/mongo/client/remote_command_targeter_standalone.h b/src/mongo/client/remote_command_targeter_standalone.h
index dd9e97a9df2..3667d979183 100644
--- a/src/mongo/client/remote_command_targeter_standalone.h
+++ b/src/mongo/client/remote_command_targeter_standalone.h
@@ -43,7 +43,7 @@ public:
ConnectionString connectionString() override;
- StatusWith<HostAndPort> findHost(OperationContext* txn,
+ StatusWith<HostAndPort> findHost(OperationContext* opCtx,
const ReadPreferenceSetting& readPref) override;
StatusWith<HostAndPort> findHostWithMaxWait(const ReadPreferenceSetting& readPref,
diff --git a/src/mongo/db/assemble_response.cpp b/src/mongo/db/assemble_response.cpp
index 4c6121dfb2b..a4f21cc5ae2 100644
--- a/src/mongo/db/assemble_response.cpp
+++ b/src/mongo/db/assemble_response.cpp
@@ -136,14 +136,14 @@ void generateLegacyQueryErrorResponse(const AssertionException* exception,
/**
* Fills out CurOp / OpDebug with basic command info.
*/
-void beginCommandOp(OperationContext* txn, const NamespaceString& nss, const BSONObj& queryObj) {
- auto curop = CurOp::get(txn);
- stdx::lock_guard<Client> lk(*txn->getClient());
+void beginCommandOp(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& queryObj) {
+ auto curop = CurOp::get(opCtx);
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curop->setQuery_inlock(queryObj);
curop->setNS_inlock(nss.ns());
}
-void receivedCommand(OperationContext* txn,
+void receivedCommand(OperationContext* opCtx,
const NamespaceString& nss,
Client& client,
DbResponse& dbResponse,
@@ -155,7 +155,7 @@ void receivedCommand(OperationContext* txn,
DbMessage dbMessage(message);
QueryMessage queryMessage(dbMessage);
- CurOp* op = CurOp::get(txn);
+ CurOp* op = CurOp::get(opCtx);
rpc::LegacyReplyBuilder builder{};
@@ -165,10 +165,10 @@ void receivedCommand(OperationContext* txn,
// Auth checking for Commands happens later.
int nToReturn = queryMessage.ntoreturn;
- beginCommandOp(txn, nss, request.getCommandArgs());
+ beginCommandOp(opCtx, nss, request.getCommandArgs());
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
op->markCommand_inlock();
}
@@ -177,11 +177,11 @@ void receivedCommand(OperationContext* txn,
<< ") for $cmd type ns - can only be 1 or -1",
nToReturn == 1 || nToReturn == -1);
- runCommands(txn, request, &builder);
+ runCommands(opCtx, request, &builder);
op->debug().iscommand = true;
} catch (const DBException& exception) {
- Command::generateErrorResponse(txn, &builder, exception);
+ Command::generateErrorResponse(opCtx, &builder, exception);
}
auto response = builder.done();
@@ -192,14 +192,17 @@ void receivedCommand(OperationContext* txn,
dbResponse.responseToMsgId = responseToMsgId;
}
-void receivedRpc(OperationContext* txn, Client& client, DbResponse& dbResponse, Message& message) {
+void receivedRpc(OperationContext* opCtx,
+ Client& client,
+ DbResponse& dbResponse,
+ Message& message) {
invariant(message.operation() == dbCommand);
const int32_t responseToMsgId = message.header().getId();
rpc::CommandReplyBuilder replyBuilder{};
- auto curOp = CurOp::get(txn);
+ auto curOp = CurOp::get(opCtx);
try {
// database is validated here
@@ -208,18 +211,18 @@ void receivedRpc(OperationContext* txn, Client& client, DbResponse& dbResponse,
// We construct a legacy $cmd namespace so we can fill in curOp using
// the existing logic that existed for OP_QUERY commands
NamespaceString nss(request.getDatabase(), "$cmd");
- beginCommandOp(txn, nss, request.getCommandArgs());
+ beginCommandOp(opCtx, nss, request.getCommandArgs());
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp->markCommand_inlock();
}
- runCommands(txn, request, &replyBuilder);
+ runCommands(opCtx, request, &replyBuilder);
curOp->debug().iscommand = true;
} catch (const DBException& exception) {
- Command::generateErrorResponse(txn, &replyBuilder, exception);
+ Command::generateErrorResponse(opCtx, &replyBuilder, exception);
}
auto response = replyBuilder.done();
@@ -234,7 +237,7 @@ void receivedRpc(OperationContext* txn, Client& client, DbResponse& dbResponse,
// as ordinary commands. To support old clients for another release, this helper serves
// to execute the real command from the legacy pseudo-command codepath.
// TODO: remove after MongoDB 3.2 is released
-void receivedPseudoCommand(OperationContext* txn,
+void receivedPseudoCommand(OperationContext* opCtx,
Client& client,
DbResponse& dbResponse,
Message& message,
@@ -285,10 +288,10 @@ void receivedPseudoCommand(OperationContext* txn,
interposed.setData(dbQuery, cmdMsgBuf.buf(), cmdMsgBuf.len());
interposed.header().setId(message.header().getId());
- receivedCommand(txn, interposedNss, client, dbResponse, interposed);
+ receivedCommand(opCtx, interposedNss, client, dbResponse, interposed);
}
-void receivedQuery(OperationContext* txn,
+void receivedQuery(OperationContext* opCtx,
const NamespaceString& nss,
Client& c,
DbResponse& dbResponse,
@@ -301,21 +304,21 @@ void receivedQuery(OperationContext* txn,
DbMessage d(m);
QueryMessage q(d);
- CurOp& op = *CurOp::get(txn);
+ CurOp& op = *CurOp::get(opCtx);
try {
- Client* client = txn->getClient();
+ Client* client = opCtx->getClient();
Status status = AuthorizationSession::get(client)->checkAuthForFind(nss, false);
audit::logQueryAuthzCheck(client, nss, q.query, status.code());
uassertStatusOK(status);
- dbResponse.exhaustNS = runQuery(txn, q, nss, dbResponse.response);
+ dbResponse.exhaustNS = runQuery(opCtx, q, nss, dbResponse.response);
} catch (const AssertionException& e) {
// If we got a stale config, wait in case the operation is stuck in a critical section
if (e.getCode() == ErrorCodes::SendStaleConfig) {
auto& sce = static_cast<const StaleConfigException&>(e);
- ShardingState::get(txn)->onStaleShardVersion(
- txn, NamespaceString(sce.getns()), sce.getVersionReceived());
+ ShardingState::get(opCtx)->onStaleShardVersion(
+ opCtx, NamespaceString(sce.getns()), sce.getVersionReceived());
}
dbResponse.response.reset();
@@ -326,8 +329,8 @@ void receivedQuery(OperationContext* txn,
dbResponse.responseToMsgId = responseToMsgId;
}
-void receivedKillCursors(OperationContext* txn, Message& m) {
- LastError::get(txn->getClient()).disable();
+void receivedKillCursors(OperationContext* opCtx, Message& m) {
+ LastError::get(opCtx->getClient()).disable();
DbMessage dbmessage(m);
int n = dbmessage.pullInt();
@@ -344,35 +347,35 @@ void receivedKillCursors(OperationContext* txn, Message& m) {
const char* cursorArray = dbmessage.getArray(n);
- int found = CursorManager::eraseCursorGlobalIfAuthorized(txn, n, cursorArray);
+ int found = CursorManager::eraseCursorGlobalIfAuthorized(opCtx, n, cursorArray);
if (shouldLog(logger::LogSeverity::Debug(1)) || found != n) {
LOG(found == n ? 1 : 0) << "killcursors: found " << found << " of " << n;
}
}
-void receivedInsert(OperationContext* txn, const NamespaceString& nsString, Message& m) {
+void receivedInsert(OperationContext* opCtx, const NamespaceString& nsString, Message& m) {
auto insertOp = parseLegacyInsert(m);
invariant(insertOp.ns == nsString);
for (const auto& obj : insertOp.documents) {
Status status =
- AuthorizationSession::get(txn->getClient())->checkAuthForInsert(txn, nsString, obj);
- audit::logInsertAuthzCheck(txn->getClient(), nsString, obj, status.code());
+ AuthorizationSession::get(opCtx->getClient())->checkAuthForInsert(opCtx, nsString, obj);
+ audit::logInsertAuthzCheck(opCtx->getClient(), nsString, obj, status.code());
uassertStatusOK(status);
}
- performInserts(txn, insertOp);
+ performInserts(opCtx, insertOp);
}
-void receivedUpdate(OperationContext* txn, const NamespaceString& nsString, Message& m) {
+void receivedUpdate(OperationContext* opCtx, const NamespaceString& nsString, Message& m) {
auto updateOp = parseLegacyUpdate(m);
auto& singleUpdate = updateOp.updates[0];
invariant(updateOp.ns == nsString);
Status status =
- AuthorizationSession::get(txn->getClient())
+ AuthorizationSession::get(opCtx->getClient())
->checkAuthForUpdate(
- txn, nsString, singleUpdate.query, singleUpdate.update, singleUpdate.upsert);
- audit::logUpdateAuthzCheck(txn->getClient(),
+ opCtx, nsString, singleUpdate.query, singleUpdate.update, singleUpdate.upsert);
+ audit::logUpdateAuthzCheck(opCtx->getClient(),
nsString,
singleUpdate.query,
singleUpdate.update,
@@ -381,23 +384,23 @@ void receivedUpdate(OperationContext* txn, const NamespaceString& nsString, Mess
status.code());
uassertStatusOK(status);
- performUpdates(txn, updateOp);
+ performUpdates(opCtx, updateOp);
}
-void receivedDelete(OperationContext* txn, const NamespaceString& nsString, Message& m) {
+void receivedDelete(OperationContext* opCtx, const NamespaceString& nsString, Message& m) {
auto deleteOp = parseLegacyDelete(m);
auto& singleDelete = deleteOp.deletes[0];
invariant(deleteOp.ns == nsString);
- Status status = AuthorizationSession::get(txn->getClient())
- ->checkAuthForDelete(txn, nsString, singleDelete.query);
- audit::logDeleteAuthzCheck(txn->getClient(), nsString, singleDelete.query, status.code());
+ Status status = AuthorizationSession::get(opCtx->getClient())
+ ->checkAuthForDelete(opCtx, nsString, singleDelete.query);
+ audit::logDeleteAuthzCheck(opCtx->getClient(), nsString, singleDelete.query, status.code());
uassertStatusOK(status);
- performDeletes(txn, deleteOp);
+ performDeletes(opCtx, deleteOp);
}
-bool receivedGetMore(OperationContext* txn, DbResponse& dbresponse, Message& m, CurOp& curop) {
+bool receivedGetMore(OperationContext* opCtx, DbResponse& dbresponse, Message& m, CurOp& curop) {
globalOpCounters.gotGetMore();
DbMessage d(m);
@@ -411,8 +414,8 @@ bool receivedGetMore(OperationContext* txn, DbResponse& dbresponse, Message& m,
curop.debug().cursorid = cursorid;
{
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setNS_inlock(ns);
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setNS_inlock(ns);
}
bool exhaust = false;
@@ -424,16 +427,17 @@ bool receivedGetMore(OperationContext* txn, DbResponse& dbresponse, Message& m,
str::stream() << "Invalid ns [" << ns << "]",
nsString.isValid());
- Status status = AuthorizationSession::get(txn->getClient())
+ Status status = AuthorizationSession::get(opCtx->getClient())
->checkAuthForGetMore(nsString, cursorid, false);
- audit::logGetMoreAuthzCheck(txn->getClient(), nsString, cursorid, status.code());
+ audit::logGetMoreAuthzCheck(opCtx->getClient(), nsString, cursorid, status.code());
uassertStatusOK(status);
while (MONGO_FAIL_POINT(rsStopGetMore)) {
sleepmillis(0);
}
- dbresponse.response = getMore(txn, ns, ntoreturn, cursorid, &exhaust, &isCursorAuthorized);
+ dbresponse.response =
+ getMore(opCtx, ns, ntoreturn, cursorid, &exhaust, &isCursorAuthorized);
} catch (AssertionException& e) {
if (isCursorAuthorized) {
// If a cursor with id 'cursorid' was authorized, it may have been advanced
@@ -441,7 +445,7 @@ bool receivedGetMore(OperationContext* txn, DbResponse& dbresponse, Message& m,
// because it may now be out of sync with the client's iteration state.
// SERVER-7952
// TODO Temporary code, see SERVER-4563 for a cleanup overview.
- CursorManager::eraseCursorGlobal(txn, cursorid);
+ CursorManager::eraseCursorGlobal(opCtx, cursorid);
}
BSONObjBuilder err;
@@ -473,7 +477,7 @@ bool receivedGetMore(OperationContext* txn, DbResponse& dbresponse, Message& m,
} // namespace
// Returns false when request includes 'end'
-void assembleResponse(OperationContext* txn,
+void assembleResponse(OperationContext* opCtx,
Message& m,
DbResponse& dbresponse,
const HostAndPort& remote) {
@@ -483,15 +487,15 @@ void assembleResponse(OperationContext* txn,
DbMessage dbmsg(m);
- Client& c = *txn->getClient();
+ Client& c = *opCtx->getClient();
if (c.isInDirectClient()) {
- invariant(!txn->lockState()->inAWriteUnitOfWork());
+ invariant(!opCtx->lockState()->inAWriteUnitOfWork());
} else {
LastError::get(c).startRequest();
- AuthorizationSession::get(c)->startRequest(txn);
+ AuthorizationSession::get(c)->startRequest(opCtx);
// We should not be holding any locks at this point
- invariant(!txn->lockState()->isLocked());
+ invariant(!opCtx->lockState()->isLocked());
}
const char* ns = dbmsg.messageShouldHaveNs() ? dbmsg.getns() : NULL;
@@ -507,15 +511,15 @@ void assembleResponse(OperationContext* txn,
opwrite(m);
if (nsString.coll() == "$cmd.sys.inprog") {
- receivedPseudoCommand(txn, c, dbresponse, m, "currentOp");
+ receivedPseudoCommand(opCtx, c, dbresponse, m, "currentOp");
return;
}
if (nsString.coll() == "$cmd.sys.killop") {
- receivedPseudoCommand(txn, c, dbresponse, m, "killOp");
+ receivedPseudoCommand(opCtx, c, dbresponse, m, "killOp");
return;
}
if (nsString.coll() == "$cmd.sys.unlock") {
- receivedPseudoCommand(txn, c, dbresponse, m, "fsyncUnlock");
+ receivedPseudoCommand(opCtx, c, dbresponse, m, "fsyncUnlock");
return;
}
} else {
@@ -530,9 +534,9 @@ void assembleResponse(OperationContext* txn,
opwrite(m);
}
- CurOp& currentOp = *CurOp::get(txn);
+ CurOp& currentOp = *CurOp::get(opCtx);
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
// Commands handling code will reset this if the operation is a command
// which is logically a basic CRUD operation like query, insert, etc.
currentOp.setNetworkOp_inlock(op);
@@ -546,14 +550,14 @@ void assembleResponse(OperationContext* txn,
if (op == dbQuery) {
if (isCommand) {
- receivedCommand(txn, nsString, c, dbresponse, m);
+ receivedCommand(opCtx, nsString, c, dbresponse, m);
} else {
- receivedQuery(txn, nsString, c, dbresponse, m);
+ receivedQuery(opCtx, nsString, c, dbresponse, m);
}
} else if (op == dbCommand) {
- receivedRpc(txn, c, dbresponse, m);
+ receivedRpc(opCtx, c, dbresponse, m);
} else if (op == dbGetMore) {
- if (!receivedGetMore(txn, dbresponse, m, currentOp))
+ if (!receivedGetMore(opCtx, dbresponse, m, currentOp))
shouldLogOpDebug = true;
} else if (op == dbMsg) {
// deprecated - replaced by commands
@@ -575,7 +579,7 @@ void assembleResponse(OperationContext* txn,
if (op == dbKillCursors) {
currentOp.ensureStarted();
logThresholdMs = 10;
- receivedKillCursors(txn, m);
+ receivedKillCursors(opCtx, m);
} else if (op != dbInsert && op != dbUpdate && op != dbDelete) {
log() << " operation isn't supported: " << static_cast<int>(op);
currentOp.done();
@@ -597,11 +601,11 @@ void assembleResponse(OperationContext* txn,
if (!nsString.isValid()) {
uassert(16257, str::stream() << "Invalid ns [" << ns << "]", false);
} else if (op == dbInsert) {
- receivedInsert(txn, nsString, m);
+ receivedInsert(opCtx, nsString, m);
} else if (op == dbUpdate) {
- receivedUpdate(txn, nsString, m);
+ receivedUpdate(opCtx, nsString, m);
} else if (op == dbDelete) {
- receivedDelete(txn, nsString, m);
+ receivedDelete(opCtx, nsString, m);
} else {
invariant(false);
}
@@ -624,9 +628,9 @@ void assembleResponse(OperationContext* txn,
debug.executionTimeMicros = currentOp.totalTimeMicros();
logThresholdMs += currentOp.getExpectedLatencyMs();
- Top::get(txn->getServiceContext())
+ Top::get(opCtx->getServiceContext())
.incrementGlobalLatencyStats(
- txn, currentOp.totalTimeMicros(), currentOp.getReadWriteType());
+ opCtx, currentOp.totalTimeMicros(), currentOp.getReadWriteType());
const bool shouldSample = serverGlobalParams.sampleRate == 1.0
? true
@@ -634,13 +638,13 @@ void assembleResponse(OperationContext* txn,
if (shouldLogOpDebug || (shouldSample && debug.executionTimeMicros > logThresholdMs * 1000LL)) {
Locker::LockerInfo lockerInfo;
- txn->lockState()->getLockerInfo(&lockerInfo);
+ opCtx->lockState()->getLockerInfo(&lockerInfo);
log() << debug.report(&c, currentOp, lockerInfo.stats);
}
if (shouldSample && currentOp.shouldDBProfile()) {
// Performance profiling is on
- if (txn->lockState()->isReadLocked()) {
+ if (opCtx->lockState()->isReadLocked()) {
LOG(1) << "note: not profiling because recursive read lock";
} else if (lockedForWriting()) {
// TODO SERVER-26825: Fix race condition where fsyncLock is acquired post
@@ -649,11 +653,11 @@ void assembleResponse(OperationContext* txn,
} else if (storageGlobalParams.readOnly) {
LOG(1) << "note: not profiling because server is read-only";
} else {
- profile(txn, op);
+ profile(opCtx, op);
}
}
- recordCurOpMetrics(txn);
+ recordCurOpMetrics(opCtx);
}
} // namespace mongo
diff --git a/src/mongo/db/assemble_response.h b/src/mongo/db/assemble_response.h
index c8c71fd9aad..9a50dd52dd3 100644
--- a/src/mongo/db/assemble_response.h
+++ b/src/mongo/db/assemble_response.h
@@ -40,7 +40,7 @@ class OperationContext;
// to indicate that the call is on behalf of a DBDirectClient.
extern const HostAndPort kHostAndPortForDirectClient;
-void assembleResponse(OperationContext* txn,
+void assembleResponse(OperationContext* opCtx,
Message& m,
DbResponse& dbresponse,
const HostAndPort& client);
diff --git a/src/mongo/db/audit.cpp b/src/mongo/db/audit.cpp
index a7108d3ee3f..dce575b621f 100644
--- a/src/mongo/db/audit.cpp
+++ b/src/mongo/db/audit.cpp
@@ -172,7 +172,7 @@ void logAuthentication(Client* client,
void logShardCollection(Client* client, StringData ns, const BSONObj& keyPattern, bool unique)
MONGO_AUDIT_STUB
- void writeImpersonatedUsersToMetadata(OperationContext* txn,
+ void writeImpersonatedUsersToMetadata(OperationContext* opCtx,
BSONObjBuilder* metadata) MONGO_AUDIT_STUB
void parseAndRemoveImpersonatedUsersField(BSONObj cmdObj,
diff --git a/src/mongo/db/audit.h b/src/mongo/db/audit.h
index e17fc99bd46..cbd77c85acf 100644
--- a/src/mongo/db/audit.h
+++ b/src/mongo/db/audit.h
@@ -299,7 +299,7 @@ void logShardCollection(Client* client, StringData ns, const BSONObj& keyPattern
* to the provided metadata builder. The users and roles are extracted from the current client.
* They are to be the impersonated users and roles for a Command run by an internal user.
*/
-void writeImpersonatedUsersToMetadata(OperationContext* txn, BSONObjBuilder* metadataBob);
+void writeImpersonatedUsersToMetadata(OperationContext* opCtx, BSONObjBuilder* metadataBob);
/*
* Looks for an 'impersonatedUsers' field. This field is used by mongos to
diff --git a/src/mongo/db/auth/auth_index_d.cpp b/src/mongo/db/auth/auth_index_d.cpp
index 427900c8377..0fe036e03d5 100644
--- a/src/mongo/db/auth/auth_index_d.cpp
+++ b/src/mongo/db/auth/auth_index_d.cpp
@@ -81,12 +81,12 @@ MONGO_INITIALIZER(AuthIndexKeyPatterns)(InitializerContext*) {
} // namespace
-Status verifySystemIndexes(OperationContext* txn) {
+Status verifySystemIndexes(OperationContext* opCtx) {
const NamespaceString systemUsers = AuthorizationManager::usersCollectionNamespace;
// Make sure the old unique index from v2.4 on system.users doesn't exist.
- ScopedTransaction scopedXact(txn, MODE_IX);
- AutoGetDb autoDb(txn, systemUsers.db(), MODE_X);
+ ScopedTransaction scopedXact(opCtx, MODE_IX);
+ AutoGetDb autoDb(opCtx, systemUsers.db(), MODE_X);
if (!autoDb.getDb()) {
return Status::OK();
}
@@ -98,7 +98,7 @@ Status verifySystemIndexes(OperationContext* txn) {
IndexCatalog* indexCatalog = collection->getIndexCatalog();
std::vector<IndexDescriptor*> indexes;
- indexCatalog->findIndexesByKeyPattern(txn, v1SystemUsersKeyPattern, false, &indexes);
+ indexCatalog->findIndexesByKeyPattern(opCtx, v1SystemUsersKeyPattern, false, &indexes);
if (indexCatalog && !indexes.empty()) {
fassert(ErrorCodes::AmbiguousIndexKeyPattern, indexes.size() == 1);
@@ -111,19 +111,19 @@ Status verifySystemIndexes(OperationContext* txn) {
return Status::OK();
}
-void createSystemIndexes(OperationContext* txn, Collection* collection) {
+void createSystemIndexes(OperationContext* opCtx, Collection* collection) {
invariant(collection);
const NamespaceString& ns = collection->ns();
if (ns == AuthorizationManager::usersCollectionNamespace) {
collection->getIndexCatalog()->createIndexOnEmptyCollection(
- txn,
+ opCtx,
BSON("name" << v3SystemUsersIndexName << "ns" << collection->ns().ns() << "key"
<< v3SystemUsersKeyPattern
<< "unique"
<< true));
} else if (ns == AuthorizationManager::rolesCollectionNamespace) {
collection->getIndexCatalog()->createIndexOnEmptyCollection(
- txn,
+ opCtx,
BSON("name" << v3SystemRolesIndexName << "ns" << collection->ns().ns() << "key"
<< v3SystemRolesKeyPattern
<< "unique"
diff --git a/src/mongo/db/auth/auth_index_d.h b/src/mongo/db/auth/auth_index_d.h
index 9b85e02e000..6b2be592489 100644
--- a/src/mongo/db/auth/auth_index_d.h
+++ b/src/mongo/db/auth/auth_index_d.h
@@ -41,13 +41,13 @@ namespace authindex {
* Creates the appropriate indexes on _new_ system collections supporting authentication and
* authorization.
*/
-void createSystemIndexes(OperationContext* txn, Collection* collection);
+void createSystemIndexes(OperationContext* opCtx, Collection* collection);
/**
* Verifies that only the appropriate indexes to support authentication and authorization
* are present in the admin database
*/
-Status verifySystemIndexes(OperationContext* txn);
+Status verifySystemIndexes(OperationContext* opCtx);
} // namespace authindex
} // namespace mongo
diff --git a/src/mongo/db/auth/authorization_manager.cpp b/src/mongo/db/auth/authorization_manager.cpp
index df560952d68..bca15e13223 100644
--- a/src/mongo/db/auth/authorization_manager.cpp
+++ b/src/mongo/db/auth/authorization_manager.cpp
@@ -274,14 +274,14 @@ bool AuthorizationManager::shouldValidateAuthSchemaOnStartup() {
return _startupAuthSchemaValidation;
}
-Status AuthorizationManager::getAuthorizationVersion(OperationContext* txn, int* version) {
+Status AuthorizationManager::getAuthorizationVersion(OperationContext* opCtx, int* version) {
CacheGuard guard(this, CacheGuard::fetchSynchronizationManual);
int newVersion = _version;
if (schemaVersionInvalid == newVersion) {
while (guard.otherUpdateInFetchPhase())
guard.wait();
guard.beginFetchPhase();
- Status status = _externalState->getStoredAuthorizationVersion(txn, &newVersion);
+ Status status = _externalState->getStoredAuthorizationVersion(opCtx, &newVersion);
guard.endFetchPhase();
if (!status.isOK()) {
warning() << "Problem fetching the stored schema version of authorization data: "
@@ -311,7 +311,7 @@ bool AuthorizationManager::isAuthEnabled() const {
return _authEnabled;
}
-bool AuthorizationManager::hasAnyPrivilegeDocuments(OperationContext* txn) {
+bool AuthorizationManager::hasAnyPrivilegeDocuments(OperationContext* opCtx) {
stdx::unique_lock<stdx::mutex> lk(_privilegeDocsExistMutex);
if (_privilegeDocsExist) {
// If we know that a user exists, don't re-check.
@@ -319,7 +319,7 @@ bool AuthorizationManager::hasAnyPrivilegeDocuments(OperationContext* txn) {
}
lk.unlock();
- bool privDocsExist = _externalState->hasAnyPrivilegeDocuments(txn);
+ bool privDocsExist = _externalState->hasAnyPrivilegeDocuments(opCtx);
lk.lock();
if (privDocsExist) {
@@ -414,37 +414,37 @@ Status AuthorizationManager::_initializeUserFromPrivilegeDocument(User* user,
return Status::OK();
}
-Status AuthorizationManager::getUserDescription(OperationContext* txn,
+Status AuthorizationManager::getUserDescription(OperationContext* opCtx,
const UserName& userName,
BSONObj* result) {
- return _externalState->getUserDescription(txn, userName, result);
+ return _externalState->getUserDescription(opCtx, userName, result);
}
-Status AuthorizationManager::getRoleDescription(OperationContext* txn,
+Status AuthorizationManager::getRoleDescription(OperationContext* opCtx,
const RoleName& roleName,
PrivilegeFormat privileges,
BSONObj* result) {
- return _externalState->getRoleDescription(txn, roleName, privileges, result);
+ return _externalState->getRoleDescription(opCtx, roleName, privileges, result);
}
-Status AuthorizationManager::getRolesDescription(OperationContext* txn,
+Status AuthorizationManager::getRolesDescription(OperationContext* opCtx,
const std::vector<RoleName>& roleName,
PrivilegeFormat privileges,
BSONObj* result) {
- return _externalState->getRolesDescription(txn, roleName, privileges, result);
+ return _externalState->getRolesDescription(opCtx, roleName, privileges, result);
}
-Status AuthorizationManager::getRoleDescriptionsForDB(OperationContext* txn,
+Status AuthorizationManager::getRoleDescriptionsForDB(OperationContext* opCtx,
const std::string dbname,
PrivilegeFormat privileges,
bool showBuiltinRoles,
vector<BSONObj>* result) {
return _externalState->getRoleDescriptionsForDB(
- txn, dbname, privileges, showBuiltinRoles, result);
+ opCtx, dbname, privileges, showBuiltinRoles, result);
}
-Status AuthorizationManager::acquireUser(OperationContext* txn,
+Status AuthorizationManager::acquireUser(OperationContext* opCtx,
const UserName& userName,
User** acquiredUser) {
if (userName == internalSecurity.user->getName()) {
@@ -481,7 +481,7 @@ Status AuthorizationManager::acquireUser(OperationContext* txn,
Status status = Status::OK();
for (int i = 0; i < maxAcquireRetries; ++i) {
if (authzVersion == schemaVersionInvalid) {
- Status status = _externalState->getStoredAuthorizationVersion(txn, &authzVersion);
+ Status status = _externalState->getStoredAuthorizationVersion(opCtx, &authzVersion);
if (!status.isOK())
return status;
}
@@ -496,7 +496,7 @@ Status AuthorizationManager::acquireUser(OperationContext* txn,
case schemaVersion28SCRAM:
case schemaVersion26Final:
case schemaVersion26Upgrade:
- status = _fetchUserV2(txn, userName, &user);
+ status = _fetchUserV2(opCtx, userName, &user);
break;
case schemaVersion24:
status = Status(ErrorCodes::AuthSchemaIncompatible,
@@ -535,11 +535,11 @@ Status AuthorizationManager::acquireUser(OperationContext* txn,
return Status::OK();
}
-Status AuthorizationManager::_fetchUserV2(OperationContext* txn,
+Status AuthorizationManager::_fetchUserV2(OperationContext* opCtx,
const UserName& userName,
std::unique_ptr<User>* acquiredUser) {
BSONObj userObj;
- Status status = getUserDescription(txn, userName, &userObj);
+ Status status = getUserDescription(opCtx, userName, &userObj);
if (!status.isOK()) {
return status;
}
@@ -619,9 +619,9 @@ void AuthorizationManager::_invalidateUserCache_inlock() {
_version = schemaVersionInvalid;
}
-Status AuthorizationManager::initialize(OperationContext* txn) {
+Status AuthorizationManager::initialize(OperationContext* opCtx) {
invalidateUserCache();
- Status status = _externalState->initialize(txn);
+ Status status = _externalState->initialize(opCtx);
if (!status.isOK())
return status;
@@ -733,9 +733,9 @@ void AuthorizationManager::_invalidateRelevantCacheData(const char* op,
}
void AuthorizationManager::logOp(
- OperationContext* txn, const char* op, const char* ns, const BSONObj& o, const BSONObj* o2) {
+ OperationContext* opCtx, const char* op, const char* ns, const BSONObj& o, const BSONObj* o2) {
if (appliesToAuthzData(op, ns, o)) {
- _externalState->logOp(txn, op, ns, o, o2);
+ _externalState->logOp(opCtx, op, ns, o, o2);
_invalidateRelevantCacheData(op, ns, o, o2);
}
}
diff --git a/src/mongo/db/auth/authorization_manager.h b/src/mongo/db/auth/authorization_manager.h
index b12abccad40..2532bd5217e 100644
--- a/src/mongo/db/auth/authorization_manager.h
+++ b/src/mongo/db/auth/authorization_manager.h
@@ -192,7 +192,7 @@ public:
* returns a non-OK status. When returning a non-OK status, *version will be set to
* schemaVersionInvalid (0).
*/
- Status getAuthorizationVersion(OperationContext* txn, int* version);
+ Status getAuthorizationVersion(OperationContext* opCtx, int* version);
/**
* Returns the user cache generation identifier.
@@ -207,7 +207,7 @@ public:
* meaning that once this method returns true it will continue to return true for the
* lifetime of this process, even if all users are subsequently dropped from the system.
*/
- bool hasAnyPrivilegeDocuments(OperationContext* txn);
+ bool hasAnyPrivilegeDocuments(OperationContext* opCtx);
// Checks to see if "doc" is a valid privilege document, assuming it is stored in the
// "system.users" collection of database "dbname".
@@ -222,12 +222,12 @@ public:
/**
* Delegates method call to the underlying AuthzManagerExternalState.
*/
- Status getUserDescription(OperationContext* txn, const UserName& userName, BSONObj* result);
+ Status getUserDescription(OperationContext* opCtx, const UserName& userName, BSONObj* result);
/**
* Delegates method call to the underlying AuthzManagerExternalState.
*/
- Status getRoleDescription(OperationContext* txn,
+ Status getRoleDescription(OperationContext* opCtx,
const RoleName& roleName,
PrivilegeFormat privilegeFormat,
BSONObj* result);
@@ -235,7 +235,7 @@ public:
/**
* Delegates method call to the underlying AuthzManagerExternalState.
*/
- Status getRolesDescription(OperationContext* txn,
+ Status getRolesDescription(OperationContext* opCtx,
const std::vector<RoleName>& roleName,
PrivilegeFormat privilegeFormat,
BSONObj* result);
@@ -243,7 +243,7 @@ public:
/**
* Delegates method call to the underlying AuthzManagerExternalState.
*/
- Status getRoleDescriptionsForDB(OperationContext* txn,
+ Status getRoleDescriptionsForDB(OperationContext* opCtx,
const std::string dbname,
PrivilegeFormat privilegeFormat,
bool showBuiltinRoles,
@@ -259,7 +259,7 @@ public:
* The AuthorizationManager retains ownership of the returned User object.
* On non-OK Status return values, acquiredUser will not be modified.
*/
- Status acquireUser(OperationContext* txn, const UserName& userName, User** acquiredUser);
+ Status acquireUser(OperationContext* opCtx, const UserName& userName, User** acquiredUser);
/**
* Decrements the refcount of the given User object. If the refcount has gone to zero,
@@ -282,7 +282,7 @@ public:
* system is at, this may involve building up the user cache and/or the roles graph.
* Call this function at startup and after resynchronizing a slave/secondary.
*/
- Status initialize(OperationContext* txn);
+ Status initialize(OperationContext* opCtx);
/**
* Invalidates all of the contents of the user cache.
@@ -301,7 +301,7 @@ public:
* Hook called by replication code to let the AuthorizationManager observe changes
* to relevant collections.
*/
- void logOp(OperationContext* txn,
+ void logOp(OperationContext* opCtx,
const char* opstr,
const char* ns,
const BSONObj& obj,
@@ -339,7 +339,7 @@ private:
* Fetches user information from a v2-schema user document for the named user,
* and stores a pointer to a new user object into *acquiredUser on success.
*/
- Status _fetchUserV2(OperationContext* txn,
+ Status _fetchUserV2(OperationContext* opCtx,
const UserName& userName,
std::unique_ptr<User>* acquiredUser);
diff --git a/src/mongo/db/auth/authorization_manager_global.cpp b/src/mongo/db/auth/authorization_manager_global.cpp
index f5f2ab8bffc..3172d7e1626 100644
--- a/src/mongo/db/auth/authorization_manager_global.cpp
+++ b/src/mongo/db/auth/authorization_manager_global.cpp
@@ -46,7 +46,7 @@ class AuthzVersionParameter : public ServerParameter {
public:
AuthzVersionParameter(ServerParameterSet* sps, const std::string& name);
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name);
+ virtual void append(OperationContext* opCtx, BSONObjBuilder& b, const std::string& name);
virtual Status set(const BSONElement& newValueElement);
virtual Status setFromString(const std::string& str);
};
@@ -62,11 +62,11 @@ MONGO_INITIALIZER_GENERAL(AuthzSchemaParameter,
AuthzVersionParameter::AuthzVersionParameter(ServerParameterSet* sps, const std::string& name)
: ServerParameter(sps, name, false, false) {}
-void AuthzVersionParameter::append(OperationContext* txn,
+void AuthzVersionParameter::append(OperationContext* opCtx,
BSONObjBuilder& b,
const std::string& name) {
int authzVersion;
- uassertStatusOK(getGlobalAuthorizationManager()->getAuthorizationVersion(txn, &authzVersion));
+ uassertStatusOK(getGlobalAuthorizationManager()->getAuthorizationVersion(opCtx, &authzVersion));
b.append(name, authzVersion);
}
diff --git a/src/mongo/db/auth/authorization_manager_test.cpp b/src/mongo/db/auth/authorization_manager_test.cpp
index 144f43d2735..83edae4ddef 100644
--- a/src/mongo/db/auth/authorization_manager_test.cpp
+++ b/src/mongo/db/auth/authorization_manager_test.cpp
@@ -182,9 +182,9 @@ public:
};
TEST_F(AuthorizationManagerTest, testAcquireV2User) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
- ASSERT_OK(externalState->insertPrivilegeDocument(&txn,
+ ASSERT_OK(externalState->insertPrivilegeDocument(&opCtx,
BSON("_id"
<< "admin.v2read"
<< "user"
@@ -200,7 +200,7 @@ TEST_F(AuthorizationManagerTest, testAcquireV2User) {
<< "db"
<< "test"))),
BSONObj()));
- ASSERT_OK(externalState->insertPrivilegeDocument(&txn,
+ ASSERT_OK(externalState->insertPrivilegeDocument(&opCtx,
BSON("_id"
<< "admin.v2cluster"
<< "user"
@@ -218,7 +218,7 @@ TEST_F(AuthorizationManagerTest, testAcquireV2User) {
BSONObj()));
User* v2read;
- ASSERT_OK(authzManager->acquireUser(&txn, UserName("v2read", "test"), &v2read));
+ ASSERT_OK(authzManager->acquireUser(&opCtx, UserName("v2read", "test"), &v2read));
ASSERT_EQUALS(UserName("v2read", "test"), v2read->getName());
ASSERT(v2read->isValid());
ASSERT_EQUALS(1U, v2read->getRefCount());
@@ -232,7 +232,7 @@ TEST_F(AuthorizationManagerTest, testAcquireV2User) {
authzManager->releaseUser(v2read);
User* v2cluster;
- ASSERT_OK(authzManager->acquireUser(&txn, UserName("v2cluster", "admin"), &v2cluster));
+ ASSERT_OK(authzManager->acquireUser(&opCtx, UserName("v2cluster", "admin"), &v2cluster));
ASSERT_EQUALS(UserName("v2cluster", "admin"), v2cluster->getName());
ASSERT(v2cluster->isValid());
ASSERT_EQUALS(1U, v2cluster->getRefCount());
@@ -254,11 +254,11 @@ TEST_F(AuthorizationManagerTest, testLocalX509Authorization) {
session,
SSLPeerInfo("CN=mongodb.com", {RoleName("read", "test"), RoleName("readWrite", "test")}));
ServiceContext::UniqueClient client = serviceContext.makeClient("testClient", session);
- ServiceContext::UniqueOperationContext txn = client->makeOperationContext();
+ ServiceContext::UniqueOperationContext opCtx = client->makeOperationContext();
User* x509User;
ASSERT_OK(
- authzManager->acquireUser(txn.get(), UserName("CN=mongodb.com", "$external"), &x509User));
+ authzManager->acquireUser(opCtx.get(), UserName("CN=mongodb.com", "$external"), &x509User));
ASSERT(x509User->isValid());
stdx::unordered_set<RoleName> expectedRoles{RoleName("read", "test"),
@@ -288,11 +288,11 @@ TEST_F(AuthorizationManagerTest, testLocalX509AuthorizationInvalidUser) {
session,
SSLPeerInfo("CN=mongodb.com", {RoleName("read", "test"), RoleName("write", "test")}));
ServiceContext::UniqueClient client = serviceContext.makeClient("testClient", session);
- ServiceContext::UniqueOperationContext txn = client->makeOperationContext();
+ ServiceContext::UniqueOperationContext opCtx = client->makeOperationContext();
User* x509User;
ASSERT_NOT_OK(
- authzManager->acquireUser(txn.get(), UserName("CN=10gen.com", "$external"), &x509User));
+ authzManager->acquireUser(opCtx.get(), UserName("CN=10gen.com", "$external"), &x509User));
}
TEST_F(AuthorizationManagerTest, testLocalX509AuthenticationNoAuthorization) {
@@ -301,11 +301,11 @@ TEST_F(AuthorizationManagerTest, testLocalX509AuthenticationNoAuthorization) {
transport::SessionHandle session = transportLayer.createSession();
setX509PeerInfo(session, {});
ServiceContext::UniqueClient client = serviceContext.makeClient("testClient", session);
- ServiceContext::UniqueOperationContext txn = client->makeOperationContext();
+ ServiceContext::UniqueOperationContext opCtx = client->makeOperationContext();
User* x509User;
ASSERT_NOT_OK(
- authzManager->acquireUser(txn.get(), UserName("CN=mongodb.com", "$external"), &x509User));
+ authzManager->acquireUser(opCtx.get(), UserName("CN=mongodb.com", "$external"), &x509User));
}
/**
@@ -325,15 +325,15 @@ public:
* the mock's user document catalog, without performing any role resolution. This way the tests
* can control exactly what privileges are returned for the user.
*/
- Status getUserDescription(OperationContext* txn,
+ Status getUserDescription(OperationContext* opCtx,
const UserName& userName,
BSONObj* result) override {
- return _getUserDocument(txn, userName, result);
+ return _getUserDocument(opCtx, userName, result);
}
private:
- Status _getUserDocument(OperationContext* txn, const UserName& userName, BSONObj* userDoc) {
- Status status = findOne(txn,
+ Status _getUserDocument(OperationContext* opCtx, const UserName& userName, BSONObj* userDoc) {
+ Status status = findOne(opCtx,
AuthorizationManager::usersCollectionNamespace,
BSON(AuthorizationManager::USER_NAME_FIELD_NAME
<< userName.getUser()
@@ -372,10 +372,10 @@ public:
// Tests SERVER-21535, unrecognized actions should be ignored rather than causing errors.
TEST_F(AuthorizationManagerTest, testAcquireV2UserWithUnrecognizedActions) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
ASSERT_OK(
- externalState->insertPrivilegeDocument(&txn,
+ externalState->insertPrivilegeDocument(&opCtx,
BSON("_id"
<< "admin.myUser"
<< "user"
@@ -403,7 +403,7 @@ TEST_F(AuthorizationManagerTest, testAcquireV2UserWithUnrecognizedActions) {
BSONObj()));
User* myUser;
- ASSERT_OK(authzManager->acquireUser(&txn, UserName("myUser", "test"), &myUser));
+ ASSERT_OK(authzManager->acquireUser(&opCtx, UserName("myUser", "test"), &myUser));
ASSERT_EQUALS(UserName("myUser", "test"), myUser->getName());
ASSERT(myUser->isValid());
ASSERT_EQUALS(1U, myUser->getRefCount());
@@ -444,17 +444,17 @@ public:
};
virtual void setUp() override {
- txn.setRecoveryUnit(recoveryUnit, OperationContext::kNotInUnitOfWork);
+ opCtx.setRecoveryUnit(recoveryUnit, OperationContext::kNotInUnitOfWork);
AuthorizationManagerTest::setUp();
}
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
size_t registeredChanges = 0;
MockRecoveryUnit* recoveryUnit = new MockRecoveryUnit(&registeredChanges);
};
TEST_F(AuthorizationManagerLogOpTest, testDropDatabaseAddsRecoveryUnits) {
- authzManager->logOp(&txn,
+ authzManager->logOp(&opCtx,
"c",
"admin.$cmd",
BSON("dropDatabase"
@@ -464,7 +464,7 @@ TEST_F(AuthorizationManagerLogOpTest, testDropDatabaseAddsRecoveryUnits) {
}
TEST_F(AuthorizationManagerLogOpTest, testDropAuthCollectionAddsRecoveryUnits) {
- authzManager->logOp(&txn,
+ authzManager->logOp(&opCtx,
"c",
"admin.$cmd",
BSON("drop"
@@ -472,7 +472,7 @@ TEST_F(AuthorizationManagerLogOpTest, testDropAuthCollectionAddsRecoveryUnits) {
nullptr);
ASSERT_EQ(size_t(1), registeredChanges);
- authzManager->logOp(&txn,
+ authzManager->logOp(&opCtx,
"c",
"admin.$cmd",
BSON("drop"
@@ -480,7 +480,7 @@ TEST_F(AuthorizationManagerLogOpTest, testDropAuthCollectionAddsRecoveryUnits) {
nullptr);
ASSERT_EQ(size_t(2), registeredChanges);
- authzManager->logOp(&txn,
+ authzManager->logOp(&opCtx,
"c",
"admin.$cmd",
BSON("drop"
@@ -488,7 +488,7 @@ TEST_F(AuthorizationManagerLogOpTest, testDropAuthCollectionAddsRecoveryUnits) {
nullptr);
ASSERT_EQ(size_t(3), registeredChanges);
- authzManager->logOp(&txn,
+ authzManager->logOp(&opCtx,
"c",
"admin.$cmd",
BSON("drop"
@@ -498,21 +498,21 @@ TEST_F(AuthorizationManagerLogOpTest, testDropAuthCollectionAddsRecoveryUnits) {
}
TEST_F(AuthorizationManagerLogOpTest, testCreateAnyCollectionAddsNoRecoveryUnits) {
- authzManager->logOp(&txn,
+ authzManager->logOp(&opCtx,
"c",
"admin.$cmd",
BSON("create"
<< "system.users"),
nullptr);
- authzManager->logOp(&txn,
+ authzManager->logOp(&opCtx,
"c",
"admin.$cmd",
BSON("create"
<< "system.profile"),
nullptr);
- authzManager->logOp(&txn,
+ authzManager->logOp(&opCtx,
"c",
"admin.$cmd",
BSON("create"
@@ -523,7 +523,7 @@ TEST_F(AuthorizationManagerLogOpTest, testCreateAnyCollectionAddsNoRecoveryUnits
}
TEST_F(AuthorizationManagerLogOpTest, testRawInsertToRolesCollectionAddsRecoveryUnits) {
- authzManager->logOp(&txn,
+ authzManager->logOp(&opCtx,
"i",
"admin.system.profile",
BSON("_id"
@@ -531,7 +531,7 @@ TEST_F(AuthorizationManagerLogOpTest, testRawInsertToRolesCollectionAddsRecovery
nullptr);
ASSERT_EQ(size_t(0), registeredChanges);
- authzManager->logOp(&txn,
+ authzManager->logOp(&opCtx,
"i",
"admin.system.users",
BSON("_id"
@@ -539,7 +539,7 @@ TEST_F(AuthorizationManagerLogOpTest, testRawInsertToRolesCollectionAddsRecovery
nullptr);
ASSERT_EQ(size_t(0), registeredChanges);
- authzManager->logOp(&txn,
+ authzManager->logOp(&opCtx,
"i",
"admin.system.roles",
BSON("_id"
diff --git a/src/mongo/db/auth/authorization_session.cpp b/src/mongo/db/auth/authorization_session.cpp
index a91f350e24d..0fdf38884cd 100644
--- a/src/mongo/db/auth/authorization_session.cpp
+++ b/src/mongo/db/auth/authorization_session.cpp
@@ -95,14 +95,15 @@ AuthorizationManager& AuthorizationSession::getAuthorizationManager() {
return _externalState->getAuthorizationManager();
}
-void AuthorizationSession::startRequest(OperationContext* txn) {
- _externalState->startRequest(txn);
- _refreshUserInfoAsNeeded(txn);
+void AuthorizationSession::startRequest(OperationContext* opCtx) {
+ _externalState->startRequest(opCtx);
+ _refreshUserInfoAsNeeded(opCtx);
}
-Status AuthorizationSession::addAndAuthorizeUser(OperationContext* txn, const UserName& userName) {
+Status AuthorizationSession::addAndAuthorizeUser(OperationContext* opCtx,
+ const UserName& userName) {
User* user;
- Status status = getAuthorizationManager().acquireUser(txn, userName, &user);
+ Status status = getAuthorizationManager().acquireUser(opCtx, userName, &user);
if (!status.isOK()) {
return status;
}
@@ -370,7 +371,7 @@ Status AuthorizationSession::checkAuthForGetMore(const NamespaceString& ns,
return Status::OK();
}
-Status AuthorizationSession::checkAuthForInsert(OperationContext* txn,
+Status AuthorizationSession::checkAuthForInsert(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& document) {
if (ns.coll() == "system.indexes"_sd) {
@@ -388,7 +389,7 @@ Status AuthorizationSession::checkAuthForInsert(OperationContext* txn,
}
} else {
ActionSet required{ActionType::insert};
- if (documentValidationDisabled(txn)) {
+ if (documentValidationDisabled(opCtx)) {
required.addAction(ActionType::bypassDocumentValidation);
}
if (!isAuthorizedForActionsOnNamespace(ns, required)) {
@@ -400,7 +401,7 @@ Status AuthorizationSession::checkAuthForInsert(OperationContext* txn,
return Status::OK();
}
-Status AuthorizationSession::checkAuthForUpdate(OperationContext* txn,
+Status AuthorizationSession::checkAuthForUpdate(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& query,
const BSONObj& update,
@@ -413,7 +414,7 @@ Status AuthorizationSession::checkAuthForUpdate(OperationContext* txn,
operationType = "upsert"_sd;
}
- if (documentValidationDisabled(txn)) {
+ if (documentValidationDisabled(opCtx)) {
required.addAction(ActionType::bypassDocumentValidation);
}
@@ -425,7 +426,7 @@ Status AuthorizationSession::checkAuthForUpdate(OperationContext* txn,
return Status::OK();
}
-Status AuthorizationSession::checkAuthForDelete(OperationContext* txn,
+Status AuthorizationSession::checkAuthForDelete(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& query) {
if (!isAuthorizedForActionsOnNamespace(ns, ActionType::remove)) {
@@ -730,7 +731,7 @@ bool AuthorizationSession::isAuthenticatedAsUserWithRole(const RoleName& roleNam
return false;
}
-void AuthorizationSession::_refreshUserInfoAsNeeded(OperationContext* txn) {
+void AuthorizationSession::_refreshUserInfoAsNeeded(OperationContext* opCtx) {
AuthorizationManager& authMan = getAuthorizationManager();
UserSet::iterator it = _authenticatedUsers.begin();
while (it != _authenticatedUsers.end()) {
@@ -742,7 +743,7 @@ void AuthorizationSession::_refreshUserInfoAsNeeded(OperationContext* txn) {
UserName name = user->getName();
User* updatedUser;
- Status status = authMan.acquireUser(txn, name, &updatedUser);
+ Status status = authMan.acquireUser(opCtx, name, &updatedUser);
switch (status.code()) {
case ErrorCodes::OK: {
// Success! Replace the old User object with the updated one.
diff --git a/src/mongo/db/auth/authorization_session.h b/src/mongo/db/auth/authorization_session.h
index 2cd46a8d59f..c2bb3fbfafd 100644
--- a/src/mongo/db/auth/authorization_session.h
+++ b/src/mongo/db/auth/authorization_session.h
@@ -105,13 +105,13 @@ public:
// Should be called at the beginning of every new request. This performs the checks
// necessary to determine if localhost connections should be given full access.
// TODO: try to eliminate the need for this call.
- void startRequest(OperationContext* txn);
+ void startRequest(OperationContext* opCtx);
/**
* Adds the User identified by "UserName" to the authorization session, acquiring privileges
* for it in the process.
*/
- Status addAndAuthorizeUser(OperationContext* txn, const UserName& userName);
+ Status addAndAuthorizeUser(OperationContext* opCtx, const UserName& userName);
// Returns the authenticated user with the given name. Returns NULL
// if no such user is found.
@@ -155,7 +155,7 @@ public:
// Checks if this connection has the privileges necessary to perform the given update on the
// given namespace.
- Status checkAuthForUpdate(OperationContext* txn,
+ Status checkAuthForUpdate(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& query,
const BSONObj& update,
@@ -164,13 +164,13 @@ public:
// Checks if this connection has the privileges necessary to insert the given document
// to the given namespace. Correctly interprets inserts to system.indexes and performs
// the proper auth checks for index building.
- Status checkAuthForInsert(OperationContext* txn,
+ Status checkAuthForInsert(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& document);
// Checks if this connection has the privileges necessary to perform a delete on the given
// namespace.
- Status checkAuthForDelete(OperationContext* txn,
+ Status checkAuthForDelete(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& query);
@@ -292,7 +292,7 @@ protected:
private:
// If any users authenticated on this session are marked as invalid this updates them with
// up-to-date information. May require a read lock on the "admin" db to read the user data.
- void _refreshUserInfoAsNeeded(OperationContext* txn);
+ void _refreshUserInfoAsNeeded(OperationContext* opCtx);
// Checks if this connection is authorized for the given Privilege, ignoring whether or not
diff --git a/src/mongo/db/auth/authorization_session_test.cpp b/src/mongo/db/auth/authorization_session_test.cpp
index 0c242b6c13d..ec82f193947 100644
--- a/src/mongo/db/auth/authorization_session_test.cpp
+++ b/src/mongo/db/auth/authorization_session_test.cpp
@@ -59,7 +59,7 @@ public:
_findsShouldFail = enable;
}
- virtual Status findOne(OperationContext* txn,
+ virtual Status findOne(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& query,
BSONObj* result) {
@@ -67,7 +67,7 @@ public:
return Status(ErrorCodes::UnknownError,
"findOne on admin.system.users set to fail in mock.");
}
- return AuthzManagerExternalStateMock::findOne(txn, collectionName, query, result);
+ return AuthzManagerExternalStateMock::findOne(opCtx, collectionName, query, result);
}
private:
@@ -77,7 +77,7 @@ private:
class AuthorizationSessionTest : public ::mongo::unittest::Test {
public:
FailureCapableAuthzManagerExternalStateMock* managerState;
- OperationContextNoop _txn;
+ OperationContextNoop _opCtx;
AuthzSessionExternalStateMock* sessionState;
std::unique_ptr<AuthorizationManager> authzManager;
std::unique_ptr<AuthorizationSessionForTest> authzSession;
@@ -143,10 +143,10 @@ TEST_F(AuthorizationSessionTest, AddUserAndCheckAuthorization) {
// Check that you can't authorize a user that doesn't exist.
ASSERT_EQUALS(ErrorCodes::UserNotFound,
- authzSession->addAndAuthorizeUser(&_txn, UserName("spencer", "test")));
+ authzSession->addAndAuthorizeUser(&_opCtx, UserName("spencer", "test")));
// Add a user with readWrite and dbAdmin on the test DB
- ASSERT_OK(managerState->insertPrivilegeDocument(&_txn,
+ ASSERT_OK(managerState->insertPrivilegeDocument(&_opCtx,
BSON("user"
<< "spencer"
<< "db"
@@ -164,7 +164,7 @@ TEST_F(AuthorizationSessionTest, AddUserAndCheckAuthorization) {
<< "db"
<< "test"))),
BSONObj()));
- ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("spencer", "test")));
+ ASSERT_OK(authzSession->addAndAuthorizeUser(&_opCtx, UserName("spencer", "test")));
ASSERT_TRUE(
authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::insert));
@@ -175,7 +175,7 @@ TEST_F(AuthorizationSessionTest, AddUserAndCheckAuthorization) {
// Add an admin user with readWriteAnyDatabase
ASSERT_OK(
- managerState->insertPrivilegeDocument(&_txn,
+ managerState->insertPrivilegeDocument(&_opCtx,
BSON("user"
<< "admin"
<< "db"
@@ -189,7 +189,7 @@ TEST_F(AuthorizationSessionTest, AddUserAndCheckAuthorization) {
<< "db"
<< "admin"))),
BSONObj()));
- ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("admin", "admin")));
+ ASSERT_OK(authzSession->addAndAuthorizeUser(&_opCtx, UserName("admin", "admin")));
ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
ResourcePattern::forExactNamespace(NamespaceString("anydb.somecollection")),
@@ -222,7 +222,7 @@ TEST_F(AuthorizationSessionTest, AddUserAndCheckAuthorization) {
TEST_F(AuthorizationSessionTest, DuplicateRolesOK) {
// Add a user with doubled-up readWrite and single dbAdmin on the test DB
- ASSERT_OK(managerState->insertPrivilegeDocument(&_txn,
+ ASSERT_OK(managerState->insertPrivilegeDocument(&_opCtx,
BSON("user"
<< "spencer"
<< "db"
@@ -244,7 +244,7 @@ TEST_F(AuthorizationSessionTest, DuplicateRolesOK) {
<< "db"
<< "test"))),
BSONObj()));
- ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("spencer", "test")));
+ ASSERT_OK(authzSession->addAndAuthorizeUser(&_opCtx, UserName("spencer", "test")));
ASSERT_TRUE(
authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::insert));
@@ -255,7 +255,7 @@ TEST_F(AuthorizationSessionTest, DuplicateRolesOK) {
}
TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
- ASSERT_OK(managerState->insertPrivilegeDocument(&_txn,
+ ASSERT_OK(managerState->insertPrivilegeDocument(&_opCtx,
BSON("user"
<< "rw"
<< "db"
@@ -273,7 +273,7 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
<< "db"
<< "test"))),
BSONObj()));
- ASSERT_OK(managerState->insertPrivilegeDocument(&_txn,
+ ASSERT_OK(managerState->insertPrivilegeDocument(&_opCtx,
BSON("user"
<< "useradmin"
<< "db"
@@ -288,7 +288,7 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
<< "test"))),
BSONObj()));
ASSERT_OK(
- managerState->insertPrivilegeDocument(&_txn,
+ managerState->insertPrivilegeDocument(&_opCtx,
BSON("user"
<< "rwany"
<< "db"
@@ -307,7 +307,7 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
<< "admin"))),
BSONObj()));
ASSERT_OK(
- managerState->insertPrivilegeDocument(&_txn,
+ managerState->insertPrivilegeDocument(&_opCtx,
BSON("user"
<< "useradminany"
<< "db"
@@ -322,7 +322,7 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
<< "admin"))),
BSONObj()));
- ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("rwany", "test")));
+ ASSERT_OK(authzSession->addAndAuthorizeUser(&_opCtx, UserName("rwany", "test")));
ASSERT_FALSE(
authzSession->isAuthorizedForActionsOnResource(testUsersCollResource, ActionType::insert));
@@ -342,7 +342,7 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
authzSession->isAuthorizedForActionsOnResource(otherProfileCollResource, ActionType::find));
// Logging in as useradminany@test implicitly logs out rwany@test.
- ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("useradminany", "test")));
+ ASSERT_OK(authzSession->addAndAuthorizeUser(&_opCtx, UserName("useradminany", "test")));
ASSERT_FALSE(
authzSession->isAuthorizedForActionsOnResource(testUsersCollResource, ActionType::insert));
ASSERT_TRUE(
@@ -361,7 +361,7 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
authzSession->isAuthorizedForActionsOnResource(otherProfileCollResource, ActionType::find));
// Logging in as rw@test implicitly logs out useradminany@test.
- ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("rw", "test")));
+ ASSERT_OK(authzSession->addAndAuthorizeUser(&_opCtx, UserName("rw", "test")));
ASSERT_FALSE(
authzSession->isAuthorizedForActionsOnResource(testUsersCollResource, ActionType::insert));
@@ -382,7 +382,7 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
// Logging in as useradmin@test implicitly logs out rw@test.
- ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("useradmin", "test")));
+ ASSERT_OK(authzSession->addAndAuthorizeUser(&_opCtx, UserName("useradmin", "test")));
ASSERT_FALSE(
authzSession->isAuthorizedForActionsOnResource(testUsersCollResource, ActionType::insert));
ASSERT_FALSE(
@@ -403,7 +403,7 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
TEST_F(AuthorizationSessionTest, InvalidateUser) {
// Add a readWrite user
- ASSERT_OK(managerState->insertPrivilegeDocument(&_txn,
+ ASSERT_OK(managerState->insertPrivilegeDocument(&_opCtx,
BSON("user"
<< "spencer"
<< "db"
@@ -417,7 +417,7 @@ TEST_F(AuthorizationSessionTest, InvalidateUser) {
<< "db"
<< "test"))),
BSONObj()));
- ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("spencer", "test")));
+ ASSERT_OK(authzSession->addAndAuthorizeUser(&_opCtx, UserName("spencer", "test")));
ASSERT_TRUE(
authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::find));
@@ -430,8 +430,8 @@ TEST_F(AuthorizationSessionTest, InvalidateUser) {
// Change the user to be read-only
int ignored;
managerState->remove(
- &_txn, AuthorizationManager::usersCollectionNamespace, BSONObj(), BSONObj(), &ignored);
- ASSERT_OK(managerState->insertPrivilegeDocument(&_txn,
+ &_opCtx, AuthorizationManager::usersCollectionNamespace, BSONObj(), BSONObj(), &ignored);
+ ASSERT_OK(managerState->insertPrivilegeDocument(&_opCtx,
BSON("user"
<< "spencer"
<< "db"
@@ -448,7 +448,7 @@ TEST_F(AuthorizationSessionTest, InvalidateUser) {
// Make sure that invalidating the user causes the session to reload its privileges.
authzManager->invalidateUserByName(user->getName());
- authzSession->startRequest(&_txn); // Refreshes cached data for invalid users
+ authzSession->startRequest(&_opCtx); // Refreshes cached data for invalid users
ASSERT_TRUE(
authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::find));
ASSERT_FALSE(
@@ -459,10 +459,10 @@ TEST_F(AuthorizationSessionTest, InvalidateUser) {
// Delete the user.
managerState->remove(
- &_txn, AuthorizationManager::usersCollectionNamespace, BSONObj(), BSONObj(), &ignored);
+ &_opCtx, AuthorizationManager::usersCollectionNamespace, BSONObj(), BSONObj(), &ignored);
// Make sure that invalidating the user causes the session to reload its privileges.
authzManager->invalidateUserByName(user->getName());
- authzSession->startRequest(&_txn); // Refreshes cached data for invalid users
+ authzSession->startRequest(&_opCtx); // Refreshes cached data for invalid users
ASSERT_FALSE(
authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::find));
ASSERT_FALSE(
@@ -472,7 +472,7 @@ TEST_F(AuthorizationSessionTest, InvalidateUser) {
TEST_F(AuthorizationSessionTest, UseOldUserInfoInFaceOfConnectivityProblems) {
// Add a readWrite user
- ASSERT_OK(managerState->insertPrivilegeDocument(&_txn,
+ ASSERT_OK(managerState->insertPrivilegeDocument(&_opCtx,
BSON("user"
<< "spencer"
<< "db"
@@ -486,7 +486,7 @@ TEST_F(AuthorizationSessionTest, UseOldUserInfoInFaceOfConnectivityProblems) {
<< "db"
<< "test"))),
BSONObj()));
- ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("spencer", "test")));
+ ASSERT_OK(authzSession->addAndAuthorizeUser(&_opCtx, UserName("spencer", "test")));
ASSERT_TRUE(
authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::find));
@@ -500,8 +500,8 @@ TEST_F(AuthorizationSessionTest, UseOldUserInfoInFaceOfConnectivityProblems) {
int ignored;
managerState->setFindsShouldFail(true);
managerState->remove(
- &_txn, AuthorizationManager::usersCollectionNamespace, BSONObj(), BSONObj(), &ignored);
- ASSERT_OK(managerState->insertPrivilegeDocument(&_txn,
+ &_opCtx, AuthorizationManager::usersCollectionNamespace, BSONObj(), BSONObj(), &ignored);
+ ASSERT_OK(managerState->insertPrivilegeDocument(&_opCtx,
BSON("user"
<< "spencer"
<< "db"
@@ -520,7 +520,7 @@ TEST_F(AuthorizationSessionTest, UseOldUserInfoInFaceOfConnectivityProblems) {
// document lookup to fail, the authz session should continue to use its known out-of-date
// privilege data.
authzManager->invalidateUserByName(user->getName());
- authzSession->startRequest(&_txn); // Refreshes cached data for invalid users
+ authzSession->startRequest(&_opCtx); // Refreshes cached data for invalid users
ASSERT_TRUE(
authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::find));
ASSERT_TRUE(
@@ -529,7 +529,7 @@ TEST_F(AuthorizationSessionTest, UseOldUserInfoInFaceOfConnectivityProblems) {
// Once we configure document lookup to succeed again, authorization checks should
// observe the new values.
managerState->setFindsShouldFail(false);
- authzSession->startRequest(&_txn); // Refreshes cached data for invalid users
+ authzSession->startRequest(&_opCtx); // Refreshes cached data for invalid users
ASSERT_TRUE(
authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::find));
ASSERT_FALSE(
diff --git a/src/mongo/db/auth/authz_manager_external_state.cpp b/src/mongo/db/auth/authz_manager_external_state.cpp
index c81c6d1324f..f0a30f51a9d 100644
--- a/src/mongo/db/auth/authz_manager_external_state.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state.cpp
@@ -40,11 +40,11 @@ stdx::function<std::unique_ptr<AuthzManagerExternalState>()> AuthzManagerExterna
AuthzManagerExternalState::AuthzManagerExternalState() = default;
AuthzManagerExternalState::~AuthzManagerExternalState() = default;
-bool AuthzManagerExternalState::shouldUseRolesFromConnection(OperationContext* txn,
+bool AuthzManagerExternalState::shouldUseRolesFromConnection(OperationContext* opCtx,
const UserName& userName) {
- if (!txn || !txn->getClient() || !txn->getClient()->session())
+ if (!opCtx || !opCtx->getClient() || !opCtx->getClient()->session())
return false;
- auto& sslPeerInfo = SSLPeerInfo::forSession(txn->getClient()->session());
+ auto& sslPeerInfo = SSLPeerInfo::forSession(opCtx->getClient()->session());
return sslPeerInfo.subjectName == userName.getUser() && userName.getDB() == "$external" &&
!sslPeerInfo.roles.empty();
}
diff --git a/src/mongo/db/auth/authz_manager_external_state.h b/src/mongo/db/auth/authz_manager_external_state.h
index f3d2cc721c6..18277c272b2 100644
--- a/src/mongo/db/auth/authz_manager_external_state.h
+++ b/src/mongo/db/auth/authz_manager_external_state.h
@@ -65,7 +65,7 @@ public:
* calling other methods. Object may not be used after this method returns something other
* than Status::OK().
*/
- virtual Status initialize(OperationContext* txn) = 0;
+ virtual Status initialize(OperationContext* opCtx) = 0;
/**
* Creates an external state manipulator for an AuthorizationSession whose
@@ -78,7 +78,7 @@ public:
* Retrieves the schema version of the persistent data describing users and roles.
* Will leave *outVersion unmodified on non-OK status return values.
*/
- virtual Status getStoredAuthorizationVersion(OperationContext* txn, int* outVersion) = 0;
+ virtual Status getStoredAuthorizationVersion(OperationContext* opCtx, int* outVersion) = 0;
/**
* Writes into "result" a document describing the named user and returns Status::OK(). The
@@ -91,7 +91,7 @@ public:
*
* If the user does not exist, returns ErrorCodes::UserNotFound.
*/
- virtual Status getUserDescription(OperationContext* txn,
+ virtual Status getUserDescription(OperationContext* opCtx,
const UserName& userName,
BSONObj* result) = 0;
@@ -109,7 +109,7 @@ public:
*
* If the role does not exist, returns ErrorCodes::RoleNotFound.
*/
- virtual Status getRoleDescription(OperationContext* txn,
+ virtual Status getRoleDescription(OperationContext* opCtx,
const RoleName& roleName,
PrivilegeFormat showPrivileges,
BSONObj* result) = 0;
@@ -127,7 +127,7 @@ public:
* inconsistencies.
*/
- virtual Status getRolesDescription(OperationContext* txn,
+ virtual Status getRolesDescription(OperationContext* opCtx,
const std::vector<RoleName>& roles,
PrivilegeFormat showPrivileges,
BSONObj* result) = 0;
@@ -144,7 +144,7 @@ public:
* some of the information in a given role description is inconsistent, the document will
* contain a "warnings" array, with std::string messages describing inconsistencies.
*/
- virtual Status getRoleDescriptionsForDB(OperationContext* txn,
+ virtual Status getRoleDescriptionsForDB(OperationContext* opCtx,
const std::string dbname,
PrivilegeFormat showPrivileges,
bool showBuiltinRoles,
@@ -153,9 +153,9 @@ public:
/**
* Returns true if there exists at least one privilege document in the system.
*/
- virtual bool hasAnyPrivilegeDocuments(OperationContext* txn) = 0;
+ virtual bool hasAnyPrivilegeDocuments(OperationContext* opCtx) = 0;
- virtual void logOp(OperationContext* txn,
+ virtual void logOp(OperationContext* opCtx,
const char* op,
const char* ns,
const BSONObj& o,
@@ -169,7 +169,7 @@ protected:
* Returns true if roles for this user were provided by the client, and can be obtained from
* the connection.
*/
- bool shouldUseRolesFromConnection(OperationContext* txn, const UserName& username);
+ bool shouldUseRolesFromConnection(OperationContext* opCtx, const UserName& username);
};
} // namespace mongo
diff --git a/src/mongo/db/auth/authz_manager_external_state_d.cpp b/src/mongo/db/auth/authz_manager_external_state_d.cpp
index bd24c6c5b19..6d497417ac0 100644
--- a/src/mongo/db/auth/authz_manager_external_state_d.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_d.cpp
@@ -60,13 +60,13 @@ AuthzManagerExternalStateMongod::makeAuthzSessionExternalState(AuthorizationMana
}
Status AuthzManagerExternalStateMongod::query(
- OperationContext* txn,
+ OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& query,
const BSONObj& projection,
const stdx::function<void(const BSONObj&)>& resultProcessor) {
try {
- DBDirectClient client(txn);
+ DBDirectClient client(opCtx);
client.query(resultProcessor, collectionName.ns(), query, &projection);
return Status::OK();
} catch (const DBException& e) {
@@ -74,14 +74,14 @@ Status AuthzManagerExternalStateMongod::query(
}
}
-Status AuthzManagerExternalStateMongod::findOne(OperationContext* txn,
+Status AuthzManagerExternalStateMongod::findOne(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& query,
BSONObj* result) {
- AutoGetCollectionForRead ctx(txn, collectionName);
+ AutoGetCollectionForRead ctx(opCtx, collectionName);
BSONObj found;
- if (Helpers::findOne(txn, ctx.getCollection(), query, found)) {
+ if (Helpers::findOne(opCtx, ctx.getCollection(), query, found)) {
*result = found.getOwned();
return Status::OK();
}
diff --git a/src/mongo/db/auth/authz_manager_external_state_d.h b/src/mongo/db/auth/authz_manager_external_state_d.h
index f0fb7f91568..a620528e46f 100644
--- a/src/mongo/db/auth/authz_manager_external_state_d.h
+++ b/src/mongo/db/auth/authz_manager_external_state_d.h
@@ -52,11 +52,11 @@ public:
std::unique_ptr<AuthzSessionExternalState> makeAuthzSessionExternalState(
AuthorizationManager* authzManager) override;
- virtual Status findOne(OperationContext* txn,
+ virtual Status findOne(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& query,
BSONObj* result);
- virtual Status query(OperationContext* txn,
+ virtual Status query(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& query,
const BSONObj& projection,
diff --git a/src/mongo/db/auth/authz_manager_external_state_local.cpp b/src/mongo/db/auth/authz_manager_external_state_local.cpp
index 30bed6ee33f..de706dcd9da 100644
--- a/src/mongo/db/auth/authz_manager_external_state_local.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_local.cpp
@@ -44,8 +44,8 @@ namespace mongo {
using std::vector;
-Status AuthzManagerExternalStateLocal::initialize(OperationContext* txn) {
- Status status = _initializeRoleGraph(txn);
+Status AuthzManagerExternalStateLocal::initialize(OperationContext* opCtx) {
+ Status status = _initializeRoleGraph(opCtx);
if (!status.isOK()) {
if (status == ErrorCodes::GraphContainsCycle) {
error() << "Cycle detected in admin.system.roles; role inheritance disabled. "
@@ -61,10 +61,10 @@ Status AuthzManagerExternalStateLocal::initialize(OperationContext* txn) {
return Status::OK();
}
-Status AuthzManagerExternalStateLocal::getStoredAuthorizationVersion(OperationContext* txn,
+Status AuthzManagerExternalStateLocal::getStoredAuthorizationVersion(OperationContext* opCtx,
int* outVersion) {
BSONObj versionDoc;
- Status status = findOne(txn,
+ Status status = findOne(opCtx,
AuthorizationManager::versionCollectionNamespace,
AuthorizationManager::versionDocumentQuery,
&versionDoc);
@@ -134,10 +134,10 @@ void addPrivilegeObjectsOrWarningsToArrayElement(mutablebson::Element privileges
}
} // namespace
-bool AuthzManagerExternalStateLocal::hasAnyPrivilegeDocuments(OperationContext* txn) {
+bool AuthzManagerExternalStateLocal::hasAnyPrivilegeDocuments(OperationContext* opCtx) {
BSONObj userBSONObj;
Status statusFindUsers =
- findOne(txn, AuthorizationManager::usersCollectionNamespace, BSONObj(), &userBSONObj);
+ findOne(opCtx, AuthorizationManager::usersCollectionNamespace, BSONObj(), &userBSONObj);
// If we were unable to complete the query,
// it's best to assume that there _are_ privilege documents.
@@ -145,23 +145,23 @@ bool AuthzManagerExternalStateLocal::hasAnyPrivilegeDocuments(OperationContext*
return true;
}
Status statusFindRoles =
- findOne(txn, AuthorizationManager::rolesCollectionNamespace, BSONObj(), &userBSONObj);
+ findOne(opCtx, AuthorizationManager::rolesCollectionNamespace, BSONObj(), &userBSONObj);
return statusFindRoles != ErrorCodes::NoMatchingDocument;
}
-Status AuthzManagerExternalStateLocal::getUserDescription(OperationContext* txn,
+Status AuthzManagerExternalStateLocal::getUserDescription(OperationContext* opCtx,
const UserName& userName,
BSONObj* result) {
Status status = Status::OK();
- if (!shouldUseRolesFromConnection(txn, userName)) {
- status = _getUserDocument(txn, userName, result);
+ if (!shouldUseRolesFromConnection(opCtx, userName)) {
+ status = _getUserDocument(opCtx, userName, result);
if (!status.isOK())
return status;
} else {
// We are able to artifically construct the external user from the request
BSONArrayBuilder userRoles;
- auto& sslPeerInfo = SSLPeerInfo::forSession(txn->getClient()->session());
+ auto& sslPeerInfo = SSLPeerInfo::forSession(opCtx->getClient()->session());
for (const RoleName& role : sslPeerInfo.roles) {
userRoles << BSON("role" << role.getRole() << "db" << role.getDB());
}
@@ -238,10 +238,10 @@ void AuthzManagerExternalStateLocal::resolveUserRoles(mutablebson::Document* use
}
}
-Status AuthzManagerExternalStateLocal::_getUserDocument(OperationContext* txn,
+Status AuthzManagerExternalStateLocal::_getUserDocument(OperationContext* opCtx,
const UserName& userName,
BSONObj* userDoc) {
- Status status = findOne(txn,
+ Status status = findOne(opCtx,
AuthorizationManager::usersCollectionNamespace,
BSON(AuthorizationManager::USER_NAME_FIELD_NAME
<< userName.getUser()
@@ -256,7 +256,7 @@ Status AuthzManagerExternalStateLocal::_getUserDocument(OperationContext* txn,
return status;
}
-Status AuthzManagerExternalStateLocal::getRoleDescription(OperationContext* txn,
+Status AuthzManagerExternalStateLocal::getRoleDescription(OperationContext* opCtx,
const RoleName& roleName,
PrivilegeFormat showPrivileges,
BSONObj* result) {
@@ -274,7 +274,7 @@ Status AuthzManagerExternalStateLocal::getRoleDescription(OperationContext* txn,
return _getRoleDescription_inlock(roleName, showPrivileges, result);
}
-Status AuthzManagerExternalStateLocal::getRolesDescription(OperationContext* txn,
+Status AuthzManagerExternalStateLocal::getRolesDescription(OperationContext* opCtx,
const std::vector<RoleName>& roles,
PrivilegeFormat showPrivileges,
BSONObj* result) {
@@ -357,7 +357,7 @@ Status AuthzManagerExternalStateLocal::_getRoleDescription_inlock(const RoleName
return Status::OK();
}
-Status AuthzManagerExternalStateLocal::getRoleDescriptionsForDB(OperationContext* txn,
+Status AuthzManagerExternalStateLocal::getRoleDescriptionsForDB(OperationContext* opCtx,
const std::string dbname,
PrivilegeFormat showPrivileges,
bool showBuiltinRoles,
@@ -400,7 +400,7 @@ void addRoleFromDocumentOrWarn(RoleGraph* roleGraph, const BSONObj& doc) {
} // namespace
-Status AuthzManagerExternalStateLocal::_initializeRoleGraph(OperationContext* txn) {
+Status AuthzManagerExternalStateLocal::_initializeRoleGraph(OperationContext* opCtx) {
stdx::lock_guard<stdx::mutex> lkInitialzeRoleGraph(_roleGraphMutex);
_roleGraphState = roleGraphStateInitial;
@@ -408,7 +408,7 @@ Status AuthzManagerExternalStateLocal::_initializeRoleGraph(OperationContext* tx
RoleGraph newRoleGraph;
Status status =
- query(txn,
+ query(opCtx,
AuthorizationManager::rolesCollectionNamespace,
BSONObj(),
BSONObj(),
@@ -440,15 +440,15 @@ Status AuthzManagerExternalStateLocal::_initializeRoleGraph(OperationContext* tx
class AuthzManagerExternalStateLocal::AuthzManagerLogOpHandler : public RecoveryUnit::Change {
public:
- // None of the parameters below (except txn and externalState) need to live longer than the
+ // None of the parameters below (except opCtx and externalState) need to live longer than the
// instantiations of this class
- AuthzManagerLogOpHandler(OperationContext* txn,
+ AuthzManagerLogOpHandler(OperationContext* opCtx,
AuthzManagerExternalStateLocal* externalState,
const char* op,
const char* ns,
const BSONObj& o,
const BSONObj* o2)
- : _txn(txn),
+ : _opCtx(opCtx),
_externalState(externalState),
_op(op),
_ns(ns),
@@ -460,7 +460,7 @@ public:
virtual void commit() {
stdx::lock_guard<stdx::mutex> lk(_externalState->_roleGraphMutex);
Status status = _externalState->_roleGraph.handleLogOp(
- _txn, _op.c_str(), NamespaceString(_ns.c_str()), _o, _isO2Set ? &_o2 : NULL);
+ _opCtx, _op.c_str(), NamespaceString(_ns.c_str()), _o, _isO2Set ? &_o2 : NULL);
if (status == ErrorCodes::OplogOperationUnsupported) {
_externalState->_roleGraph = RoleGraph();
@@ -491,7 +491,7 @@ public:
virtual void rollback() {}
private:
- OperationContext* _txn;
+ OperationContext* _opCtx;
AuthzManagerExternalStateLocal* _externalState;
const std::string _op;
const std::string _ns;
@@ -502,10 +502,11 @@ private:
};
void AuthzManagerExternalStateLocal::logOp(
- OperationContext* txn, const char* op, const char* ns, const BSONObj& o, const BSONObj* o2) {
+ OperationContext* opCtx, const char* op, const char* ns, const BSONObj& o, const BSONObj* o2) {
if (ns == AuthorizationManager::rolesCollectionNamespace.ns() ||
ns == AuthorizationManager::adminCommandNamespace.ns()) {
- txn->recoveryUnit()->registerChange(new AuthzManagerLogOpHandler(txn, this, op, ns, o, o2));
+ opCtx->recoveryUnit()->registerChange(
+ new AuthzManagerLogOpHandler(opCtx, this, op, ns, o, o2));
}
}
diff --git a/src/mongo/db/auth/authz_manager_external_state_local.h b/src/mongo/db/auth/authz_manager_external_state_local.h
index e1e2cbdc1e2..ccaa7e2d452 100644
--- a/src/mongo/db/auth/authz_manager_external_state_local.h
+++ b/src/mongo/db/auth/authz_manager_external_state_local.h
@@ -55,27 +55,27 @@ class AuthzManagerExternalStateLocal : public AuthzManagerExternalState {
public:
virtual ~AuthzManagerExternalStateLocal() = default;
- virtual Status initialize(OperationContext* txn);
+ virtual Status initialize(OperationContext* opCtx);
- virtual Status getStoredAuthorizationVersion(OperationContext* txn, int* outVersion);
- virtual Status getUserDescription(OperationContext* txn,
+ virtual Status getStoredAuthorizationVersion(OperationContext* opCtx, int* outVersion);
+ virtual Status getUserDescription(OperationContext* opCtx,
const UserName& userName,
BSONObj* result);
- virtual Status getRoleDescription(OperationContext* txn,
+ virtual Status getRoleDescription(OperationContext* opCtx,
const RoleName& roleName,
PrivilegeFormat showPrivileges,
BSONObj* result);
- virtual Status getRolesDescription(OperationContext* txn,
+ virtual Status getRolesDescription(OperationContext* opCtx,
const std::vector<RoleName>& roles,
PrivilegeFormat showPrivileges,
BSONObj* result);
- virtual Status getRoleDescriptionsForDB(OperationContext* txn,
+ virtual Status getRoleDescriptionsForDB(OperationContext* opCtx,
const std::string dbname,
PrivilegeFormat showPrivileges,
bool showBuiltinRoles,
std::vector<BSONObj>* result);
- bool hasAnyPrivilegeDocuments(OperationContext* txn) override;
+ bool hasAnyPrivilegeDocuments(OperationContext* opCtx) override;
/**
* Finds a document matching "query" in "collectionName", and store a shared-ownership
@@ -84,7 +84,7 @@ public:
* Returns Status::OK() on success. If no match is found, returns
* ErrorCodes::NoMatchingDocument. Other errors returned as appropriate.
*/
- virtual Status findOne(OperationContext* txn,
+ virtual Status findOne(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& query,
BSONObj* result) = 0;
@@ -93,14 +93,17 @@ public:
* Finds all documents matching "query" in "collectionName". For each document returned,
* calls the function resultProcessor on it.
*/
- virtual Status query(OperationContext* txn,
+ virtual Status query(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& query,
const BSONObj& projection,
const stdx::function<void(const BSONObj&)>& resultProcessor) = 0;
- virtual void logOp(
- OperationContext* txn, const char* op, const char* ns, const BSONObj& o, const BSONObj* o2);
+ virtual void logOp(OperationContext* opCtx,
+ const char* op,
+ const char* ns,
+ const BSONObj& o,
+ const BSONObj* o2);
/**
* Takes a user document, and processes it with the RoleGraph, in order to recursively
@@ -127,12 +130,12 @@ private:
/**
* Initializes the role graph from the contents of the admin.system.roles collection.
*/
- Status _initializeRoleGraph(OperationContext* txn);
+ Status _initializeRoleGraph(OperationContext* opCtx);
/**
* Fetches the user document for "userName" from local storage, and stores it into "result".
*/
- Status _getUserDocument(OperationContext* txn, const UserName& userName, BSONObj* result);
+ Status _getUserDocument(OperationContext* opCtx, const UserName& userName, BSONObj* result);
Status _getRoleDescription_inlock(const RoleName& roleName,
PrivilegeFormat showPrivileges,
diff --git a/src/mongo/db/auth/authz_manager_external_state_mock.cpp b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
index 6c2fe3f9398..ff58770d230 100644
--- a/src/mongo/db/auth/authz_manager_external_state_mock.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
@@ -107,7 +107,7 @@ AuthzManagerExternalStateMock::makeAuthzSessionExternalState(AuthorizationManage
return stdx::make_unique<AuthzSessionExternalStateMock>(authzManager);
}
-Status AuthzManagerExternalStateMock::findOne(OperationContext* txn,
+Status AuthzManagerExternalStateMock::findOne(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& query,
BSONObj* result) {
@@ -120,7 +120,7 @@ Status AuthzManagerExternalStateMock::findOne(OperationContext* txn,
}
Status AuthzManagerExternalStateMock::query(
- OperationContext* txn,
+ OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& query,
const BSONObj&,
@@ -142,7 +142,7 @@ Status AuthzManagerExternalStateMock::query(
return status;
}
-Status AuthzManagerExternalStateMock::insert(OperationContext* txn,
+Status AuthzManagerExternalStateMock::insert(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& document,
const BSONObj&) {
@@ -158,19 +158,19 @@ Status AuthzManagerExternalStateMock::insert(OperationContext* txn,
_documents[collectionName].push_back(toInsert);
if (_authzManager) {
- _authzManager->logOp(txn, "i", collectionName.ns().c_str(), toInsert, NULL);
+ _authzManager->logOp(opCtx, "i", collectionName.ns().c_str(), toInsert, NULL);
}
return Status::OK();
}
-Status AuthzManagerExternalStateMock::insertPrivilegeDocument(OperationContext* txn,
+Status AuthzManagerExternalStateMock::insertPrivilegeDocument(OperationContext* opCtx,
const BSONObj& userObj,
const BSONObj& writeConcern) {
- return insert(txn, AuthorizationManager::usersCollectionNamespace, userObj, writeConcern);
+ return insert(opCtx, AuthorizationManager::usersCollectionNamespace, userObj, writeConcern);
}
-Status AuthzManagerExternalStateMock::updateOne(OperationContext* txn,
+Status AuthzManagerExternalStateMock::updateOne(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& query,
const BSONObj& updatePattern,
@@ -197,7 +197,7 @@ Status AuthzManagerExternalStateMock::updateOne(OperationContext* txn,
BSONObj idQuery = driver.makeOplogEntryQuery(newObj, false);
if (_authzManager) {
- _authzManager->logOp(txn, "u", collectionName.ns().c_str(), logObj, &idQuery);
+ _authzManager->logOp(opCtx, "u", collectionName.ns().c_str(), logObj, &idQuery);
}
return Status::OK();
@@ -205,7 +205,7 @@ Status AuthzManagerExternalStateMock::updateOne(OperationContext* txn,
if (query.hasField("_id")) {
document.root().appendElement(query["_id"]);
}
- status = driver.populateDocumentWithQueryFields(txn, query, NULL, document);
+ status = driver.populateDocumentWithQueryFields(opCtx, query, NULL, document);
if (!status.isOK()) {
return status;
}
@@ -213,13 +213,13 @@ Status AuthzManagerExternalStateMock::updateOne(OperationContext* txn,
if (!status.isOK()) {
return status;
}
- return insert(txn, collectionName, document.getObject(), writeConcern);
+ return insert(opCtx, collectionName, document.getObject(), writeConcern);
} else {
return status;
}
}
-Status AuthzManagerExternalStateMock::update(OperationContext* txn,
+Status AuthzManagerExternalStateMock::update(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& query,
const BSONObj& updatePattern,
@@ -231,7 +231,7 @@ Status AuthzManagerExternalStateMock::update(OperationContext* txn,
"AuthzManagerExternalStateMock::update not implemented in mock.");
}
-Status AuthzManagerExternalStateMock::remove(OperationContext* txn,
+Status AuthzManagerExternalStateMock::remove(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& query,
const BSONObj&,
@@ -244,7 +244,7 @@ Status AuthzManagerExternalStateMock::remove(OperationContext* txn,
++n;
if (_authzManager) {
- _authzManager->logOp(txn, "d", collectionName.ns().c_str(), idQuery, NULL);
+ _authzManager->logOp(opCtx, "d", collectionName.ns().c_str(), idQuery, NULL);
}
}
*numRemoved = n;
diff --git a/src/mongo/db/auth/authz_manager_external_state_mock.h b/src/mongo/db/auth/authz_manager_external_state_mock.h
index 0b8fa3e0b3c..4d90099f120 100644
--- a/src/mongo/db/auth/authz_manager_external_state_mock.h
+++ b/src/mongo/db/auth/authz_manager_external_state_mock.h
@@ -60,12 +60,12 @@ public:
std::unique_ptr<AuthzSessionExternalState> makeAuthzSessionExternalState(
AuthorizationManager* authzManager) override;
- virtual Status findOne(OperationContext* txn,
+ virtual Status findOne(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& query,
BSONObj* result);
- virtual Status query(OperationContext* txn,
+ virtual Status query(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& query,
const BSONObj& projection, // Currently unused in mock
@@ -74,25 +74,25 @@ public:
/**
* Inserts the given user object into the "admin" database.
*/
- Status insertPrivilegeDocument(OperationContext* txn,
+ Status insertPrivilegeDocument(OperationContext* opCtx,
const BSONObj& userObj,
const BSONObj& writeConcern);
// This implementation does not understand uniqueness constraints.
- virtual Status insert(OperationContext* txn,
+ virtual Status insert(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& document,
const BSONObj& writeConcern);
// This implementation does not understand uniqueness constraints, ignores writeConcern,
// and only correctly handles some upsert behaviors.
- virtual Status updateOne(OperationContext* txn,
+ virtual Status updateOne(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& query,
const BSONObj& updatePattern,
bool upsert,
const BSONObj& writeConcern);
- virtual Status update(OperationContext* txn,
+ virtual Status update(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& query,
const BSONObj& updatePattern,
@@ -100,7 +100,7 @@ public:
bool multi,
const BSONObj& writeConcern,
int* nMatched);
- virtual Status remove(OperationContext* txn,
+ virtual Status remove(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& query,
const BSONObj& writeConcern,
diff --git a/src/mongo/db/auth/authz_manager_external_state_s.cpp b/src/mongo/db/auth/authz_manager_external_state_s.cpp
index 68a68a7b009..b999cc002ef 100644
--- a/src/mongo/db/auth/authz_manager_external_state_s.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_s.cpp
@@ -83,7 +83,7 @@ AuthzManagerExternalStateMongos::AuthzManagerExternalStateMongos() = default;
AuthzManagerExternalStateMongos::~AuthzManagerExternalStateMongos() = default;
-Status AuthzManagerExternalStateMongos::initialize(OperationContext* txn) {
+Status AuthzManagerExternalStateMongos::initialize(OperationContext* opCtx) {
return Status::OK();
}
@@ -92,7 +92,7 @@ AuthzManagerExternalStateMongos::makeAuthzSessionExternalState(AuthorizationMana
return stdx::make_unique<AuthzSessionExternalStateMongos>(authzManager);
}
-Status AuthzManagerExternalStateMongos::getStoredAuthorizationVersion(OperationContext* txn,
+Status AuthzManagerExternalStateMongos::getStoredAuthorizationVersion(OperationContext* opCtx,
int* outVersion) {
// Note: we are treating
// { 'getParameter' : 1, <authSchemaVersionServerParameter> : 1 }
@@ -100,8 +100,8 @@ Status AuthzManagerExternalStateMongos::getStoredAuthorizationVersion(OperationC
// that runs this command
BSONObj getParameterCmd = BSON("getParameter" << 1 << authSchemaVersionServerParameter << 1);
BSONObjBuilder builder;
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementReadCommand(
- txn, "admin", getParameterCmd, &builder);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementReadCommand(
+ opCtx, "admin", getParameterCmd, &builder);
BSONObj cmdResult = builder.obj();
if (!ok) {
return getStatusFromCommandResult(cmdResult);
@@ -116,10 +116,10 @@ Status AuthzManagerExternalStateMongos::getStoredAuthorizationVersion(OperationC
return Status::OK();
}
-Status AuthzManagerExternalStateMongos::getUserDescription(OperationContext* txn,
+Status AuthzManagerExternalStateMongos::getUserDescription(OperationContext* opCtx,
const UserName& userName,
BSONObj* result) {
- if (!shouldUseRolesFromConnection(txn, userName)) {
+ if (!shouldUseRolesFromConnection(opCtx, userName)) {
BSONObj usersInfoCmd =
BSON("usersInfo" << BSON_ARRAY(BSON(AuthorizationManager::USER_NAME_FIELD_NAME
<< userName.getUser()
@@ -130,8 +130,8 @@ Status AuthzManagerExternalStateMongos::getUserDescription(OperationContext* txn
<< "showCredentials"
<< true);
BSONObjBuilder builder;
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementReadCommand(
- txn, "admin", usersInfoCmd, &builder);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementReadCommand(
+ opCtx, "admin", usersInfoCmd, &builder);
BSONObj cmdResult = builder.obj();
if (!ok) {
return getStatusFromCommandResult(cmdResult);
@@ -156,7 +156,7 @@ Status AuthzManagerExternalStateMongos::getUserDescription(OperationContext* txn
// Obtain privilege information from the config servers for all roles acquired from the X509
// certificate.
BSONArrayBuilder userRolesBuilder;
- auto& sslPeerInfo = SSLPeerInfo::forSession(txn->getClient()->session());
+ auto& sslPeerInfo = SSLPeerInfo::forSession(opCtx->getClient()->session());
for (const RoleName& role : sslPeerInfo.roles) {
userRolesBuilder.append(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
<< role.getRole()
@@ -169,8 +169,8 @@ Status AuthzManagerExternalStateMongos::getUserDescription(OperationContext* txn
<< "asUserFragment");
BSONObjBuilder cmdResultBuilder;
- const bool cmdOk = Grid::get(txn)->catalogClient(txn)->runUserManagementReadCommand(
- txn, "admin", rolesInfoCmd, &cmdResultBuilder);
+ const bool cmdOk = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementReadCommand(
+ opCtx, "admin", rolesInfoCmd, &cmdResultBuilder);
BSONObj cmdResult = cmdResultBuilder.obj();
if (!cmdOk || !cmdResult["userFragment"].ok()) {
return Status(ErrorCodes::FailedToParse,
@@ -204,7 +204,7 @@ Status AuthzManagerExternalStateMongos::getUserDescription(OperationContext* txn
}
}
-Status AuthzManagerExternalStateMongos::getRoleDescription(OperationContext* txn,
+Status AuthzManagerExternalStateMongos::getRoleDescription(OperationContext* opCtx,
const RoleName& roleName,
PrivilegeFormat showPrivileges,
BSONObj* result) {
@@ -217,8 +217,8 @@ Status AuthzManagerExternalStateMongos::getRoleDescription(OperationContext* txn
addShowPrivilegesToBuilder(&rolesInfoCmd, showPrivileges);
BSONObjBuilder builder;
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementReadCommand(
- txn, "admin", rolesInfoCmd.obj(), &builder);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementReadCommand(
+ opCtx, "admin", rolesInfoCmd.obj(), &builder);
BSONObj cmdResult = builder.obj();
if (!ok) {
return getStatusFromCommandResult(cmdResult);
@@ -239,7 +239,7 @@ Status AuthzManagerExternalStateMongos::getRoleDescription(OperationContext* txn
*result = foundRoles[0].Obj().getOwned();
return Status::OK();
}
-Status AuthzManagerExternalStateMongos::getRolesDescription(OperationContext* txn,
+Status AuthzManagerExternalStateMongos::getRolesDescription(OperationContext* opCtx,
const std::vector<RoleName>& roles,
PrivilegeFormat showPrivileges,
BSONObj* result) {
@@ -257,8 +257,8 @@ Status AuthzManagerExternalStateMongos::getRolesDescription(OperationContext* tx
addShowPrivilegesToBuilder(&rolesInfoCmd, showPrivileges);
BSONObjBuilder builder;
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementReadCommand(
- txn, "admin", rolesInfoCmd.obj(), &builder);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementReadCommand(
+ opCtx, "admin", rolesInfoCmd.obj(), &builder);
BSONObj cmdResult = builder.obj();
if (!ok) {
return getStatusFromCommandResult(cmdResult);
@@ -273,7 +273,7 @@ Status AuthzManagerExternalStateMongos::getRolesDescription(OperationContext* tx
return Status::OK();
}
-Status AuthzManagerExternalStateMongos::getRoleDescriptionsForDB(OperationContext* txn,
+Status AuthzManagerExternalStateMongos::getRoleDescriptionsForDB(OperationContext* opCtx,
const std::string dbname,
PrivilegeFormat showPrivileges,
bool showBuiltinRoles,
@@ -283,8 +283,8 @@ Status AuthzManagerExternalStateMongos::getRoleDescriptionsForDB(OperationContex
addShowPrivilegesToBuilder(&rolesInfoCmd, showPrivileges);
BSONObjBuilder builder;
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementReadCommand(
- txn, dbname, rolesInfoCmd.obj(), &builder);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementReadCommand(
+ opCtx, dbname, rolesInfoCmd.obj(), &builder);
BSONObj cmdResult = builder.obj();
if (!ok) {
return getStatusFromCommandResult(cmdResult);
@@ -297,11 +297,11 @@ Status AuthzManagerExternalStateMongos::getRoleDescriptionsForDB(OperationContex
return Status::OK();
}
-bool AuthzManagerExternalStateMongos::hasAnyPrivilegeDocuments(OperationContext* txn) {
+bool AuthzManagerExternalStateMongos::hasAnyPrivilegeDocuments(OperationContext* opCtx) {
BSONObj usersInfoCmd = BSON("usersInfo" << 1);
BSONObjBuilder userBuilder;
- bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementReadCommand(
- txn, "admin", usersInfoCmd, &userBuilder);
+ bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementReadCommand(
+ opCtx, "admin", usersInfoCmd, &userBuilder);
if (!ok) {
// If we were unable to complete the query,
// it's best to assume that there _are_ privilege documents. This might happen
@@ -318,8 +318,8 @@ bool AuthzManagerExternalStateMongos::hasAnyPrivilegeDocuments(OperationContext*
BSONObj rolesInfoCmd = BSON("rolesInfo" << 1);
BSONObjBuilder roleBuilder;
- ok = Grid::get(txn)->catalogClient(txn)->runUserManagementReadCommand(
- txn, "admin", rolesInfoCmd, &roleBuilder);
+ ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementReadCommand(
+ opCtx, "admin", rolesInfoCmd, &roleBuilder);
if (!ok) {
return true;
}
diff --git a/src/mongo/db/auth/authz_manager_external_state_s.h b/src/mongo/db/auth/authz_manager_external_state_s.h
index 9f9c5c12d2b..3ae98503aad 100644
--- a/src/mongo/db/auth/authz_manager_external_state_s.h
+++ b/src/mongo/db/auth/authz_manager_external_state_s.h
@@ -50,28 +50,28 @@ public:
AuthzManagerExternalStateMongos();
virtual ~AuthzManagerExternalStateMongos();
- virtual Status initialize(OperationContext* txn);
+ virtual Status initialize(OperationContext* opCtx);
std::unique_ptr<AuthzSessionExternalState> makeAuthzSessionExternalState(
AuthorizationManager* authzManager) override;
- virtual Status getStoredAuthorizationVersion(OperationContext* txn, int* outVersion);
- virtual Status getUserDescription(OperationContext* txn,
+ virtual Status getStoredAuthorizationVersion(OperationContext* opCtx, int* outVersion);
+ virtual Status getUserDescription(OperationContext* opCtx,
const UserName& userName,
BSONObj* result);
- virtual Status getRoleDescription(OperationContext* txn,
+ virtual Status getRoleDescription(OperationContext* opCtx,
const RoleName& roleName,
PrivilegeFormat showPrivileges,
BSONObj* result);
- virtual Status getRolesDescription(OperationContext* txn,
+ virtual Status getRolesDescription(OperationContext* opCtx,
const std::vector<RoleName>& roles,
PrivilegeFormat showPrivileges,
BSONObj* result);
- virtual Status getRoleDescriptionsForDB(OperationContext* txn,
+ virtual Status getRoleDescriptionsForDB(OperationContext* opCtx,
const std::string dbname,
PrivilegeFormat showPrivileges,
bool showBuiltinRoles,
std::vector<BSONObj>* result);
- bool hasAnyPrivilegeDocuments(OperationContext* txn) override;
+ bool hasAnyPrivilegeDocuments(OperationContext* opCtx) override;
};
} // namespace mongo
diff --git a/src/mongo/db/auth/authz_session_external_state.h b/src/mongo/db/auth/authz_session_external_state.h
index 5ce7ab155ed..ba43be8050b 100644
--- a/src/mongo/db/auth/authz_session_external_state.h
+++ b/src/mongo/db/auth/authz_session_external_state.h
@@ -73,7 +73,7 @@ public:
// Should be called at the beginning of every new request. This performs the checks
// necessary to determine if localhost connections should be given full access.
- virtual void startRequest(OperationContext* txn) = 0;
+ virtual void startRequest(OperationContext* opCtx) = 0;
protected:
// This class should never be instantiated directly.
diff --git a/src/mongo/db/auth/authz_session_external_state_d.cpp b/src/mongo/db/auth/authz_session_external_state_d.cpp
index 25298b2823b..2ea9386341e 100644
--- a/src/mongo/db/auth/authz_session_external_state_d.cpp
+++ b/src/mongo/db/auth/authz_session_external_state_d.cpp
@@ -46,11 +46,11 @@ AuthzSessionExternalStateMongod::AuthzSessionExternalStateMongod(AuthorizationMa
: AuthzSessionExternalStateServerCommon(authzManager) {}
AuthzSessionExternalStateMongod::~AuthzSessionExternalStateMongod() {}
-void AuthzSessionExternalStateMongod::startRequest(OperationContext* txn) {
+void AuthzSessionExternalStateMongod::startRequest(OperationContext* opCtx) {
// No locks should be held as this happens before any database accesses occur
- dassert(!txn->lockState()->isLocked());
+ dassert(!opCtx->lockState()->isLocked());
- _checkShouldAllowLocalhost(txn);
+ _checkShouldAllowLocalhost(opCtx);
}
bool AuthzSessionExternalStateMongod::shouldIgnoreAuthChecks() const {
diff --git a/src/mongo/db/auth/authz_session_external_state_d.h b/src/mongo/db/auth/authz_session_external_state_d.h
index 0761ad5ea39..dd861ac0dcc 100644
--- a/src/mongo/db/auth/authz_session_external_state_d.h
+++ b/src/mongo/db/auth/authz_session_external_state_d.h
@@ -49,7 +49,7 @@ public:
virtual bool serverIsArbiter() const;
- virtual void startRequest(OperationContext* txn);
+ virtual void startRequest(OperationContext* opCtx);
};
} // namespace mongo
diff --git a/src/mongo/db/auth/authz_session_external_state_mock.h b/src/mongo/db/auth/authz_session_external_state_mock.h
index f1aa1d7a166..d577d7b2e4c 100644
--- a/src/mongo/db/auth/authz_session_external_state_mock.h
+++ b/src/mongo/db/auth/authz_session_external_state_mock.h
@@ -68,7 +68,7 @@ public:
_allowLocalhostReturnValue = returnValue;
}
- virtual void startRequest(OperationContext* txn) {}
+ virtual void startRequest(OperationContext* opCtx) {}
private:
bool _ignoreAuthChecksReturnValue;
diff --git a/src/mongo/db/auth/authz_session_external_state_s.cpp b/src/mongo/db/auth/authz_session_external_state_s.cpp
index 9576bc79a2d..47ac9598735 100644
--- a/src/mongo/db/auth/authz_session_external_state_s.cpp
+++ b/src/mongo/db/auth/authz_session_external_state_s.cpp
@@ -42,8 +42,8 @@ AuthzSessionExternalStateMongos::AuthzSessionExternalStateMongos(AuthorizationMa
: AuthzSessionExternalStateServerCommon(authzManager) {}
AuthzSessionExternalStateMongos::~AuthzSessionExternalStateMongos() {}
-void AuthzSessionExternalStateMongos::startRequest(OperationContext* txn) {
- _checkShouldAllowLocalhost(txn);
+void AuthzSessionExternalStateMongos::startRequest(OperationContext* opCtx) {
+ _checkShouldAllowLocalhost(opCtx);
}
} // namespace mongo
diff --git a/src/mongo/db/auth/authz_session_external_state_s.h b/src/mongo/db/auth/authz_session_external_state_s.h
index 7db5078db2a..d8a4f81164e 100644
--- a/src/mongo/db/auth/authz_session_external_state_s.h
+++ b/src/mongo/db/auth/authz_session_external_state_s.h
@@ -45,7 +45,7 @@ public:
AuthzSessionExternalStateMongos(AuthorizationManager* authzManager);
virtual ~AuthzSessionExternalStateMongos();
- virtual void startRequest(OperationContext* txn);
+ virtual void startRequest(OperationContext* opCtx);
};
} // namespace mongo
diff --git a/src/mongo/db/auth/authz_session_external_state_server_common.cpp b/src/mongo/db/auth/authz_session_external_state_server_common.cpp
index faf039ba7dc..24d3b8ceecc 100644
--- a/src/mongo/db/auth/authz_session_external_state_server_common.cpp
+++ b/src/mongo/db/auth/authz_session_external_state_server_common.cpp
@@ -56,7 +56,7 @@ AuthzSessionExternalStateServerCommon::AuthzSessionExternalStateServerCommon(
: AuthzSessionExternalState(authzManager), _allowLocalhost(enableLocalhostAuthBypass) {}
AuthzSessionExternalStateServerCommon::~AuthzSessionExternalStateServerCommon() {}
-void AuthzSessionExternalStateServerCommon::_checkShouldAllowLocalhost(OperationContext* txn) {
+void AuthzSessionExternalStateServerCommon::_checkShouldAllowLocalhost(OperationContext* opCtx) {
if (!_authzManager->isAuthEnabled())
return;
// If we know that an admin user exists, don't re-check.
@@ -68,7 +68,7 @@ void AuthzSessionExternalStateServerCommon::_checkShouldAllowLocalhost(Operation
return;
}
- _allowLocalhost = !_authzManager->hasAnyPrivilegeDocuments(txn);
+ _allowLocalhost = !_authzManager->hasAnyPrivilegeDocuments(opCtx);
if (_allowLocalhost) {
std::call_once(checkShouldAllowLocalhostOnceFlag, []() {
log() << "note: no users configured in admin.system.users, allowing localhost "
diff --git a/src/mongo/db/auth/authz_session_external_state_server_common.h b/src/mongo/db/auth/authz_session_external_state_server_common.h
index df7ceb6c9f4..d46c78413ba 100644
--- a/src/mongo/db/auth/authz_session_external_state_server_common.h
+++ b/src/mongo/db/auth/authz_session_external_state_server_common.h
@@ -54,7 +54,7 @@ protected:
// Checks whether or not localhost connections should be given full access and stores the
// result in _allowLocalhost. Currently localhost connections are only given full access
// if there are no users in the admin database.
- void _checkShouldAllowLocalhost(OperationContext* txn);
+ void _checkShouldAllowLocalhost(OperationContext* opCtx);
private:
bool _allowLocalhost;
diff --git a/src/mongo/db/auth/impersonation_session.cpp b/src/mongo/db/auth/impersonation_session.cpp
index 791e3c56a0b..8b9a5f09fc7 100644
--- a/src/mongo/db/auth/impersonation_session.cpp
+++ b/src/mongo/db/auth/impersonation_session.cpp
@@ -45,11 +45,11 @@
namespace mongo {
-ImpersonationSessionGuard::ImpersonationSessionGuard(OperationContext* txn) : _txn(txn) {
- auto authSession = AuthorizationSession::get(_txn->getClient());
+ImpersonationSessionGuard::ImpersonationSessionGuard(OperationContext* opCtx) : _opCtx(opCtx) {
+ auto authSession = AuthorizationSession::get(_opCtx->getClient());
const auto& impersonatedUsersAndRoles =
- rpc::AuditMetadata::get(txn).getImpersonatedUsersAndRoles();
+ rpc::AuditMetadata::get(opCtx).getImpersonatedUsersAndRoles();
if (impersonatedUsersAndRoles != boost::none) {
uassert(ErrorCodes::Unauthorized,
@@ -66,8 +66,9 @@ ImpersonationSessionGuard::ImpersonationSessionGuard(OperationContext* txn) : _t
}
ImpersonationSessionGuard::~ImpersonationSessionGuard() {
- DESTRUCTOR_GUARD(
- if (_active) { AuthorizationSession::get(_txn->getClient())->clearImpersonatedUserData(); })
+ DESTRUCTOR_GUARD(if (_active) {
+ AuthorizationSession::get(_opCtx->getClient())->clearImpersonatedUserData();
+ })
}
} // namespace mongo
diff --git a/src/mongo/db/auth/impersonation_session.h b/src/mongo/db/auth/impersonation_session.h
index af2dab2f774..a2986cb0e86 100644
--- a/src/mongo/db/auth/impersonation_session.h
+++ b/src/mongo/db/auth/impersonation_session.h
@@ -39,11 +39,11 @@ class ImpersonationSessionGuard {
MONGO_DISALLOW_COPYING(ImpersonationSessionGuard);
public:
- ImpersonationSessionGuard(OperationContext* txn);
+ ImpersonationSessionGuard(OperationContext* opCtx);
~ImpersonationSessionGuard();
private:
- OperationContext* _txn;
+ OperationContext* _opCtx;
bool _active{false};
};
diff --git a/src/mongo/db/auth/role_graph.h b/src/mongo/db/auth/role_graph.h
index 42512312b64..4d8334b7710 100644
--- a/src/mongo/db/auth/role_graph.h
+++ b/src/mongo/db/auth/role_graph.h
@@ -242,7 +242,7 @@ public:
* operation is not supported, and other codes (typically BadValue) if the oplog operation
* is ill-described.
*/
- Status handleLogOp(OperationContext* txn,
+ Status handleLogOp(OperationContext* opCtx,
const char* op,
const NamespaceString& ns,
const BSONObj& o,
diff --git a/src/mongo/db/auth/role_graph_update.cpp b/src/mongo/db/auth/role_graph_update.cpp
index 4fdb6f6df22..3432890406c 100644
--- a/src/mongo/db/auth/role_graph_update.cpp
+++ b/src/mongo/db/auth/role_graph_update.cpp
@@ -164,7 +164,7 @@ Status handleOplogInsert(RoleGraph* roleGraph, const BSONObj& insertedObj) {
*
* Treats all updates as upserts.
*/
-Status handleOplogUpdate(OperationContext* txn,
+Status handleOplogUpdate(OperationContext* opCtx,
RoleGraph* roleGraph,
const BSONObj& updatePattern,
const BSONObj& queryPattern) {
@@ -183,7 +183,7 @@ Status handleOplogUpdate(OperationContext* txn,
status = AuthorizationManager::getBSONForRole(roleGraph, roleToUpdate, roleDocument.root());
if (status == ErrorCodes::RoleNotFound) {
// The query pattern will only contain _id, no other immutable fields are present
- status = driver.populateDocumentWithQueryFields(txn, queryPattern, NULL, roleDocument);
+ status = driver.populateDocumentWithQueryFields(opCtx, queryPattern, NULL, roleDocument);
}
if (!status.isOK())
return status;
@@ -278,7 +278,7 @@ Status RoleGraph::addRoleFromDocument(const BSONObj& doc) {
return status;
}
-Status RoleGraph::handleLogOp(OperationContext* txn,
+Status RoleGraph::handleLogOp(OperationContext* opCtx,
const char* op,
const NamespaceString& ns,
const BSONObj& o,
@@ -313,7 +313,7 @@ Status RoleGraph::handleLogOp(OperationContext* txn,
return Status(ErrorCodes::InternalError,
"Missing query pattern in update oplog entry.");
}
- return handleOplogUpdate(txn, this, o, *o2);
+ return handleOplogUpdate(opCtx, this, o, *o2);
case 'd':
return handleOplogDelete(this, o);
case 'n':
diff --git a/src/mongo/db/auth/sasl_authentication_session.cpp b/src/mongo/db/auth/sasl_authentication_session.cpp
index fdb7f024929..e449c6a7ae8 100644
--- a/src/mongo/db/auth/sasl_authentication_session.cpp
+++ b/src/mongo/db/auth/sasl_authentication_session.cpp
@@ -69,7 +69,7 @@ bool isAuthorizedCommon(SaslAuthenticationSession* session,
SaslAuthenticationSession::SaslAuthenticationSession(AuthorizationSession* authzSession)
: AuthenticationSession(AuthenticationSession::SESSION_TYPE_SASL),
- _txn(nullptr),
+ _opCtx(nullptr),
_authzSession(authzSession),
_saslStep(0),
_conversationId(0),
diff --git a/src/mongo/db/auth/sasl_authentication_session.h b/src/mongo/db/auth/sasl_authentication_session.h
index 0f531c1bb44..adaea792284 100644
--- a/src/mongo/db/auth/sasl_authentication_session.h
+++ b/src/mongo/db/auth/sasl_authentication_session.h
@@ -105,10 +105,10 @@ public:
* SaslAuthenticationSession.
*/
OperationContext* getOpCtxt() const {
- return _txn;
+ return _opCtx;
}
- void setOpCtxt(OperationContext* txn) {
- _txn = txn;
+ void setOpCtxt(OperationContext* opCtx) {
+ _opCtx = opCtx;
}
/**
@@ -167,7 +167,7 @@ public:
}
protected:
- OperationContext* _txn;
+ OperationContext* _opCtx;
AuthorizationSession* _authzSession;
std::string _authenticationDatabase;
std::string _serviceName;
diff --git a/src/mongo/db/auth/sasl_commands.cpp b/src/mongo/db/auth/sasl_commands.cpp
index b30cbf84340..85289515f95 100644
--- a/src/mongo/db/auth/sasl_commands.cpp
+++ b/src/mongo/db/auth/sasl_commands.cpp
@@ -73,7 +73,7 @@ public:
void redactForLogging(mutablebson::Document* cmdObj) override;
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& db,
BSONObj& cmdObj,
int options,
@@ -101,7 +101,7 @@ public:
const BSONObj&,
std::vector<Privilege>*) {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& db,
BSONObj& cmdObj,
int options,
@@ -270,7 +270,7 @@ void CmdSaslStart::redactForLogging(mutablebson::Document* cmdObj) {
}
}
-bool CmdSaslStart::run(OperationContext* txn,
+bool CmdSaslStart::run(OperationContext* opCtx,
const std::string& db,
BSONObj& cmdObj,
int options,
@@ -289,7 +289,7 @@ bool CmdSaslStart::run(OperationContext* txn,
std::unique_ptr<AuthenticationSession> sessionGuard(session);
- session->setOpCtxt(txn);
+ session->setOpCtxt(opCtx);
Status status = doSaslStart(client, session, db, cmdObj, &result);
appendCommandStatus(result, status);
@@ -312,7 +312,7 @@ void CmdSaslContinue::help(std::stringstream& os) const {
os << "Subsequent steps in a SASL authentication conversation.";
}
-bool CmdSaslContinue::run(OperationContext* txn,
+bool CmdSaslContinue::run(OperationContext* opCtx,
const std::string& db,
BSONObj& cmdObj,
int options,
@@ -339,7 +339,7 @@ bool CmdSaslContinue::run(OperationContext* txn,
"Attempt to switch database target during SASL authentication."));
}
- session->setOpCtxt(txn);
+ session->setOpCtxt(opCtx);
Status status = doSaslContinue(client, session, cmdObj, &result);
appendCommandStatus(result, status);
diff --git a/src/mongo/db/auth/sasl_scramsha1_test.cpp b/src/mongo/db/auth/sasl_scramsha1_test.cpp
index 0ef9dc68db7..99794ffdf29 100644
--- a/src/mongo/db/auth/sasl_scramsha1_test.cpp
+++ b/src/mongo/db/auth/sasl_scramsha1_test.cpp
@@ -218,7 +218,7 @@ protected:
ServiceContextNoop serviceContext;
ServiceContextNoop::UniqueClient client;
- ServiceContextNoop::UniqueOperationContext txn;
+ ServiceContextNoop::UniqueOperationContext opCtx;
AuthzManagerExternalStateMock* authzManagerExternalState;
std::unique_ptr<AuthorizationManager> authzManager;
@@ -229,7 +229,7 @@ protected:
void setUp() {
client = serviceContext.makeClient("test");
- txn = serviceContext.makeOperationContext(client.get());
+ opCtx = serviceContext.makeOperationContext(client.get());
auto uniqueAuthzManagerExternalStateMock =
stdx::make_unique<AuthzManagerExternalStateMock>();
@@ -240,7 +240,7 @@ protected:
stdx::make_unique<AuthzSessionExternalStateMock>(authzManager.get()));
saslServerSession = stdx::make_unique<NativeSaslAuthenticationSession>(authzSession.get());
- saslServerSession->setOpCtxt(txn.get());
+ saslServerSession->setOpCtxt(opCtx.get());
saslServerSession->start("test", "SCRAM-SHA-1", "mongodb", "MockServer.test", 1, false);
saslClientSession = stdx::make_unique<NativeSaslClientSession>();
saslClientSession->setParameter(NativeSaslClientSession::parameterMechanism, "SCRAM-SHA-1");
@@ -254,7 +254,7 @@ protected:
TEST_F(SCRAMSHA1Fixture, testServerStep1DoesNotIncludeNonceFromClientStep1) {
authzManagerExternalState->insertPrivilegeDocument(
- txn.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj());
+ opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj());
saslClientSession->setParameter(NativeSaslClientSession::parameterUser, "sajack");
saslClientSession->setParameter(NativeSaslClientSession::parameterPassword,
@@ -277,7 +277,7 @@ TEST_F(SCRAMSHA1Fixture, testServerStep1DoesNotIncludeNonceFromClientStep1) {
TEST_F(SCRAMSHA1Fixture, testClientStep2DoesNotIncludeNonceFromServerStep1) {
authzManagerExternalState->insertPrivilegeDocument(
- txn.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj());
+ opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj());
saslClientSession->setParameter(NativeSaslClientSession::parameterUser, "sajack");
saslClientSession->setParameter(NativeSaslClientSession::parameterPassword,
@@ -299,7 +299,7 @@ TEST_F(SCRAMSHA1Fixture, testClientStep2DoesNotIncludeNonceFromServerStep1) {
TEST_F(SCRAMSHA1Fixture, testClientStep2GivesBadProof) {
authzManagerExternalState->insertPrivilegeDocument(
- txn.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj());
+ opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj());
saslClientSession->setParameter(NativeSaslClientSession::parameterUser, "sajack");
saslClientSession->setParameter(NativeSaslClientSession::parameterPassword,
@@ -324,7 +324,7 @@ TEST_F(SCRAMSHA1Fixture, testClientStep2GivesBadProof) {
TEST_F(SCRAMSHA1Fixture, testServerStep2GivesBadVerifier) {
authzManagerExternalState->insertPrivilegeDocument(
- txn.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj());
+ opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj());
saslClientSession->setParameter(NativeSaslClientSession::parameterUser, "sajack");
saslClientSession->setParameter(NativeSaslClientSession::parameterPassword,
@@ -359,7 +359,7 @@ TEST_F(SCRAMSHA1Fixture, testServerStep2GivesBadVerifier) {
TEST_F(SCRAMSHA1Fixture, testSCRAM) {
authzManagerExternalState->insertPrivilegeDocument(
- txn.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj());
+ opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj());
saslClientSession->setParameter(NativeSaslClientSession::parameterUser, "sajack");
saslClientSession->setParameter(NativeSaslClientSession::parameterPassword,
@@ -372,7 +372,7 @@ TEST_F(SCRAMSHA1Fixture, testSCRAM) {
TEST_F(SCRAMSHA1Fixture, testNULLInPassword) {
authzManagerExternalState->insertPrivilegeDocument(
- txn.get(), generateSCRAMUserDocument("sajack", "saj\0ack"), BSONObj());
+ opCtx.get(), generateSCRAMUserDocument("sajack", "saj\0ack"), BSONObj());
saslClientSession->setParameter(NativeSaslClientSession::parameterUser, "sajack");
saslClientSession->setParameter(NativeSaslClientSession::parameterPassword,
@@ -386,7 +386,7 @@ TEST_F(SCRAMSHA1Fixture, testNULLInPassword) {
TEST_F(SCRAMSHA1Fixture, testCommasInUsernameAndPassword) {
authzManagerExternalState->insertPrivilegeDocument(
- txn.get(), generateSCRAMUserDocument("s,a,jack", "s,a,jack"), BSONObj());
+ opCtx.get(), generateSCRAMUserDocument("s,a,jack", "s,a,jack"), BSONObj());
saslClientSession->setParameter(NativeSaslClientSession::parameterUser, "s,a,jack");
saslClientSession->setParameter(NativeSaslClientSession::parameterPassword,
@@ -411,7 +411,7 @@ TEST_F(SCRAMSHA1Fixture, testIncorrectUser) {
TEST_F(SCRAMSHA1Fixture, testIncorrectPassword) {
authzManagerExternalState->insertPrivilegeDocument(
- txn.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj());
+ opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj());
saslClientSession->setParameter(NativeSaslClientSession::parameterUser, "sajack");
saslClientSession->setParameter(NativeSaslClientSession::parameterPassword,
@@ -427,7 +427,7 @@ TEST_F(SCRAMSHA1Fixture, testIncorrectPassword) {
TEST_F(SCRAMSHA1Fixture, testMONGODBCR) {
authzManagerExternalState->insertPrivilegeDocument(
- txn.get(), generateMONGODBCRUserDocument("sajack", "sajack"), BSONObj());
+ opCtx.get(), generateMONGODBCRUserDocument("sajack", "sajack"), BSONObj());
saslClientSession->setParameter(NativeSaslClientSession::parameterUser, "sajack");
saslClientSession->setParameter(NativeSaslClientSession::parameterPassword,
diff --git a/src/mongo/db/auth/user_cache_invalidator_job.cpp b/src/mongo/db/auth/user_cache_invalidator_job.cpp
index a9198b52fe9..707fcb59e17 100644
--- a/src/mongo/db/auth/user_cache_invalidator_job.cpp
+++ b/src/mongo/db/auth/user_cache_invalidator_job.cpp
@@ -90,11 +90,11 @@ public:
} exportedIntervalParam;
-StatusWith<OID> getCurrentCacheGeneration(OperationContext* txn) {
+StatusWith<OID> getCurrentCacheGeneration(OperationContext* opCtx) {
try {
BSONObjBuilder result;
- const bool ok = grid.catalogClient(txn)->runUserManagementReadCommand(
- txn, "admin", BSON("_getUserCacheGeneration" << 1), &result);
+ const bool ok = grid.catalogClient(opCtx)->runUserManagementReadCommand(
+ opCtx, "admin", BSON("_getUserCacheGeneration" << 1), &result);
if (!ok) {
return getStatusFromCommandResult(result.obj());
}
@@ -117,8 +117,8 @@ UserCacheInvalidator::~UserCacheInvalidator() {
wait();
}
-void UserCacheInvalidator::initialize(OperationContext* txn) {
- StatusWith<OID> currentGeneration = getCurrentCacheGeneration(txn);
+void UserCacheInvalidator::initialize(OperationContext* opCtx) {
+ StatusWith<OID> currentGeneration = getCurrentCacheGeneration(opCtx);
if (currentGeneration.isOK()) {
_previousCacheGeneration = currentGeneration.getValue();
return;
@@ -157,8 +157,8 @@ void UserCacheInvalidator::run() {
break;
}
- auto txn = cc().makeOperationContext();
- StatusWith<OID> currentGeneration = getCurrentCacheGeneration(txn.get());
+ auto opCtx = cc().makeOperationContext();
+ StatusWith<OID> currentGeneration = getCurrentCacheGeneration(opCtx.get());
if (!currentGeneration.isOK()) {
if (currentGeneration.getStatus().code() == ErrorCodes::CommandNotFound) {
warning() << "_getUserCacheGeneration command not found on config server(s), "
diff --git a/src/mongo/db/auth/user_cache_invalidator_job.h b/src/mongo/db/auth/user_cache_invalidator_job.h
index e32c7f662a3..8e83531a54b 100644
--- a/src/mongo/db/auth/user_cache_invalidator_job.h
+++ b/src/mongo/db/auth/user_cache_invalidator_job.h
@@ -46,7 +46,7 @@ public:
UserCacheInvalidator(AuthorizationManager* authzManager);
~UserCacheInvalidator();
- void initialize(OperationContext* txn);
+ void initialize(OperationContext* opCtx);
protected:
virtual std::string name() const;
diff --git a/src/mongo/db/catalog/apply_ops.cpp b/src/mongo/db/catalog/apply_ops.cpp
index 12729f3fa31..5e4e00aefb0 100644
--- a/src/mongo/db/catalog/apply_ops.cpp
+++ b/src/mongo/db/catalog/apply_ops.cpp
@@ -89,12 +89,12 @@ bool canBeAtomic(const BSONObj& applyOpCmd) {
return true;
}
-Status _applyOps(OperationContext* txn,
+Status _applyOps(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& applyOpCmd,
BSONObjBuilder* result,
int* numApplied) {
- dassert(txn->lockState()->isLockHeldForMode(
+ dassert(opCtx->lockState()->isLockHeldForMode(
ResourceId(RESOURCE_GLOBAL, ResourceId::SINGLETON_GLOBAL), MODE_X));
BSONObj ops = applyOpCmd.firstElement().Obj();
@@ -107,10 +107,10 @@ Status _applyOps(OperationContext* txn,
BSONArrayBuilder ab;
const bool alwaysUpsert =
applyOpCmd.hasField("alwaysUpsert") ? applyOpCmd["alwaysUpsert"].trueValue() : true;
- const bool haveWrappingWUOW = txn->lockState()->inAWriteUnitOfWork();
+ const bool haveWrappingWUOW = opCtx->lockState()->inAWriteUnitOfWork();
{
- repl::UnreplicatedWritesBlock uwb(txn);
+ repl::UnreplicatedWritesBlock uwb(opCtx);
while (i.more()) {
BSONElement e = i.next();
@@ -132,18 +132,18 @@ Status _applyOps(OperationContext* txn,
if (haveWrappingWUOW) {
invariant(*opType != 'c');
- if (!dbHolder().get(txn, ns)) {
+ if (!dbHolder().get(opCtx, ns)) {
throw DBException(
"cannot create a database in atomic applyOps mode; will retry without "
"atomicity",
ErrorCodes::NamespaceNotFound);
}
- OldClientContext ctx(txn, ns);
- status = repl::applyOperation_inlock(txn, ctx.db(), opObj, alwaysUpsert);
+ OldClientContext ctx(opCtx, ns);
+ status = repl::applyOperation_inlock(opCtx, ctx.db(), opObj, alwaysUpsert);
if (!status.isOK())
return status;
- logOpForDbHash(txn, ns.c_str());
+ logOpForDbHash(opCtx, ns.c_str());
} else {
try {
// Run operations under a nested lock as a hack to prevent yielding.
@@ -156,25 +156,25 @@ Status _applyOps(OperationContext* txn,
//
// We do not have a wrapping WriteUnitOfWork so it is possible for a journal
// commit to happen with a subset of ops applied.
- Lock::GlobalWrite globalWriteLockDisallowTempRelease(txn->lockState());
+ Lock::GlobalWrite globalWriteLockDisallowTempRelease(opCtx->lockState());
// Ensures that yielding will not happen (see the comment above).
DEV {
Locker::LockSnapshot lockSnapshot;
- invariant(!txn->lockState()->saveLockStateAndUnlock(&lockSnapshot));
+ invariant(!opCtx->lockState()->saveLockStateAndUnlock(&lockSnapshot));
};
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
if (*opType == 'c') {
- status = repl::applyCommand_inlock(txn, opObj, true);
+ status = repl::applyCommand_inlock(opCtx, opObj, true);
} else {
- OldClientContext ctx(txn, ns);
+ OldClientContext ctx(opCtx, ns);
status =
- repl::applyOperation_inlock(txn, ctx.db(), opObj, alwaysUpsert);
+ repl::applyOperation_inlock(opCtx, ctx.db(), opObj, alwaysUpsert);
}
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "applyOps", ns);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "applyOps", ns);
} catch (const DBException& ex) {
ab.append(false);
result->append("applied", ++(*numApplied));
@@ -185,8 +185,8 @@ Status _applyOps(OperationContext* txn,
result->append("results", ab.arr());
return Status(ErrorCodes::UnknownError, ex.what());
}
- WriteUnitOfWork wuow(txn);
- logOpForDbHash(txn, ns.c_str());
+ WriteUnitOfWork wuow(opCtx);
+ logOpForDbHash(opCtx, ns.c_str());
wuow.commit();
}
@@ -203,7 +203,7 @@ Status _applyOps(OperationContext* txn,
result->append("results", ab.arr());
} // set replicatedWrites back to original value
- if (txn->writesAreReplicated()) {
+ if (opCtx->writesAreReplicated()) {
// We want this applied atomically on slaves
// so we re-wrap without the pre-condition for speed
@@ -227,7 +227,7 @@ Status _applyOps(OperationContext* txn,
auto opObserver = getGlobalServiceContext()->getOpObserver();
invariant(opObserver);
if (haveWrappingWUOW) {
- opObserver->onApplyOps(txn, tempNS, cmdRewritten);
+ opObserver->onApplyOps(opCtx, tempNS, cmdRewritten);
} else {
// When executing applyOps outside of a wrapping WriteUnitOfWOrk, always logOp the
// command regardless of whether the individial ops succeeded and rely on any
@@ -235,14 +235,14 @@ Status _applyOps(OperationContext* txn,
// has always done and is part of its "correct" behavior.
while (true) {
try {
- WriteUnitOfWork wunit(txn);
- opObserver->onApplyOps(txn, tempNS, cmdRewritten);
+ WriteUnitOfWork wunit(opCtx);
+ opObserver->onApplyOps(opCtx, tempNS, cmdRewritten);
wunit.commit();
break;
} catch (const WriteConflictException& wce) {
LOG(2) << "WriteConflictException while logging applyOps command, retrying.";
- txn->recoveryUnit()->abandonSnapshot();
+ opCtx->recoveryUnit()->abandonSnapshot();
continue;
}
}
@@ -256,8 +256,8 @@ Status _applyOps(OperationContext* txn,
return Status::OK();
}
-Status preconditionOK(OperationContext* txn, const BSONObj& applyOpCmd, BSONObjBuilder* result) {
- dassert(txn->lockState()->isLockHeldForMode(
+Status preconditionOK(OperationContext* opCtx, const BSONObj& applyOpCmd, BSONObjBuilder* result) {
+ dassert(opCtx->lockState()->isLockHeldForMode(
ResourceId(RESOURCE_GLOBAL, ResourceId::SINGLETON_GLOBAL), MODE_X));
if (applyOpCmd["preCondition"].type() == Array) {
@@ -274,11 +274,11 @@ Status preconditionOK(OperationContext* txn, const BSONObj& applyOpCmd, BSONObjB
return {ErrorCodes::InvalidNamespace, "invalid ns: " + nss.ns()};
}
- DBDirectClient db(txn);
+ DBDirectClient db(opCtx);
BSONObj realres = db.findOne(nss.ns(), preCondition["q"].Obj());
// Get collection default collation.
- Database* database = dbHolder().get(txn, nss.db());
+ Database* database = dbHolder().get(opCtx, nss.db());
if (!database) {
return {ErrorCodes::NamespaceNotFound,
"database in ns does not exist: " + nss.ns()};
@@ -305,43 +305,43 @@ Status preconditionOK(OperationContext* txn, const BSONObj& applyOpCmd, BSONObjB
}
} // namespace
-Status applyOps(OperationContext* txn,
+Status applyOps(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& applyOpCmd,
BSONObjBuilder* result) {
- ScopedTransaction scopedXact(txn, MODE_X);
- Lock::GlobalWrite globalWriteLock(txn->lockState());
+ ScopedTransaction scopedXact(opCtx, MODE_X);
+ Lock::GlobalWrite globalWriteLock(opCtx->lockState());
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(txn, dbName);
+ bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(opCtx, dbName);
if (userInitiatedWritesAndNotPrimary)
return Status(ErrorCodes::NotMaster,
str::stream() << "Not primary while applying ops to database " << dbName);
- Status preconditionStatus = preconditionOK(txn, applyOpCmd, result);
+ Status preconditionStatus = preconditionOK(opCtx, applyOpCmd, result);
if (!preconditionStatus.isOK()) {
return preconditionStatus;
}
int numApplied = 0;
if (!canBeAtomic(applyOpCmd))
- return _applyOps(txn, dbName, applyOpCmd, result, &numApplied);
+ return _applyOps(opCtx, dbName, applyOpCmd, result, &numApplied);
// Perform write ops atomically
try {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
numApplied = 0;
- uassertStatusOK(_applyOps(txn, dbName, applyOpCmd, result, &numApplied));
+ uassertStatusOK(_applyOps(opCtx, dbName, applyOpCmd, result, &numApplied));
wunit.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "applyOps", dbName);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "applyOps", dbName);
} catch (const DBException& ex) {
if (ex.getCode() == ErrorCodes::NamespaceNotFound) {
// Retry in non-atomic mode, since MMAP cannot implicitly create a new database
// within an active WriteUnitOfWork.
- return _applyOps(txn, dbName, applyOpCmd, result, &numApplied);
+ return _applyOps(opCtx, dbName, applyOpCmd, result, &numApplied);
}
BSONArrayBuilder ab;
++numApplied;
diff --git a/src/mongo/db/catalog/apply_ops.h b/src/mongo/db/catalog/apply_ops.h
index 588d3bb370b..3a742891573 100644
--- a/src/mongo/db/catalog/apply_ops.h
+++ b/src/mongo/db/catalog/apply_ops.h
@@ -37,7 +37,7 @@ class OperationContext;
* Applies ops contained in "applyOpCmd" and populates fields in "result" to be returned to the
* user.
*/
-Status applyOps(OperationContext* txn,
+Status applyOps(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& applyOpCmd,
BSONObjBuilder* result);
diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp
index d6b11fe50dc..62b2eae8fbf 100644
--- a/src/mongo/db/catalog/capped_utils.cpp
+++ b/src/mongo/db/catalog/capped_utils.cpp
@@ -52,12 +52,12 @@
#include "mongo/util/scopeguard.h"
namespace mongo {
-Status emptyCapped(OperationContext* txn, const NamespaceString& collectionName) {
- ScopedTransaction scopedXact(txn, MODE_IX);
- AutoGetDb autoDb(txn, collectionName.db(), MODE_X);
+Status emptyCapped(OperationContext* opCtx, const NamespaceString& collectionName) {
+ ScopedTransaction scopedXact(opCtx, MODE_IX);
+ AutoGetDb autoDb(opCtx, collectionName.db(), MODE_X);
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, collectionName);
+ bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, collectionName);
if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::NotMaster,
@@ -71,7 +71,7 @@ Status emptyCapped(OperationContext* txn, const NamespaceString& collectionName)
Collection* collection = db->getCollection(collectionName);
uassert(ErrorCodes::CommandNotSupportedOnView,
str::stream() << "emptycapped not supported on view: " << collectionName.ns(),
- collection || !db->getViewCatalog()->lookup(txn, collectionName.ns()));
+ collection || !db->getViewCatalog()->lookup(opCtx, collectionName.ns()));
massert(28584, "no such collection", collection);
if (collectionName.isSystem() && !collectionName.isSystemDotProfile()) {
@@ -96,21 +96,21 @@ Status emptyCapped(OperationContext* txn, const NamespaceString& collectionName)
BackgroundOperation::assertNoBgOpInProgForNs(collectionName.ns());
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
- Status status = collection->truncate(txn);
+ Status status = collection->truncate(opCtx);
if (!status.isOK()) {
return status;
}
- getGlobalServiceContext()->getOpObserver()->onEmptyCapped(txn, collection->ns());
+ getGlobalServiceContext()->getOpObserver()->onEmptyCapped(opCtx, collection->ns());
wuow.commit();
return Status::OK();
}
-Status cloneCollectionAsCapped(OperationContext* txn,
+Status cloneCollectionAsCapped(OperationContext* opCtx,
Database* db,
const std::string& shortFrom,
const std::string& shortTo,
@@ -121,7 +121,7 @@ Status cloneCollectionAsCapped(OperationContext* txn,
Collection* fromCollection = db->getCollection(fromNs);
if (!fromCollection) {
- if (db->getViewCatalog()->lookup(txn, fromNs)) {
+ if (db->getViewCatalog()->lookup(opCtx, fromNs)) {
return Status(ErrorCodes::CommandNotSupportedOnView,
str::stream() << "cloneCollectionAsCapped not supported for views: "
<< fromNs);
@@ -136,8 +136,8 @@ Status cloneCollectionAsCapped(OperationContext* txn,
// create new collection
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
const auto fromOptions =
- fromCollection->getCatalogEntry()->getCollectionOptions(txn).toBSON();
- OldClientContext ctx(txn, toNs);
+ fromCollection->getCatalogEntry()->getCollectionOptions(opCtx).toBSON();
+ OldClientContext ctx(opCtx, toNs);
BSONObjBuilder spec;
spec.appendBool("capped", true);
spec.append("size", size);
@@ -145,13 +145,13 @@ Status cloneCollectionAsCapped(OperationContext* txn,
spec.appendBool("temp", true);
spec.appendElementsUnique(fromOptions);
- WriteUnitOfWork wunit(txn);
- Status status = userCreateNS(txn, ctx.db(), toNs, spec.done());
+ WriteUnitOfWork wunit(opCtx);
+ Status status = userCreateNS(opCtx, ctx.db(), toNs, spec.done());
if (!status.isOK())
return status;
wunit.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "cloneCollectionAsCapped", fromNs);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "cloneCollectionAsCapped", fromNs);
Collection* toCollection = db->getCollection(toNs);
invariant(toCollection); // we created above
@@ -161,12 +161,12 @@ Status cloneCollectionAsCapped(OperationContext* txn,
long long allocatedSpaceGuess =
std::max(static_cast<long long>(size * 2),
- static_cast<long long>(toCollection->getRecordStore()->storageSize(txn) * 2));
+ static_cast<long long>(toCollection->getRecordStore()->storageSize(opCtx) * 2));
- long long excessSize = fromCollection->dataSize(txn) - allocatedSpaceGuess;
+ long long excessSize = fromCollection->dataSize(opCtx) - allocatedSpaceGuess;
std::unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(
- txn, fromNs, fromCollection, PlanExecutor::YIELD_MANUAL, InternalPlanner::FORWARD));
+ opCtx, fromNs, fromCollection, PlanExecutor::YIELD_MANUAL, InternalPlanner::FORWARD));
exec->setYieldPolicy(PlanExecutor::WRITE_CONFLICT_RETRY_ONLY, fromCollection);
@@ -174,7 +174,7 @@ Status cloneCollectionAsCapped(OperationContext* txn,
RecordId loc;
PlanExecutor::ExecState state = PlanExecutor::FAILURE; // suppress uninitialized warnings
- DisableDocumentValidation validationDisabler(txn);
+ DisableDocumentValidation validationDisabler(opCtx);
int retries = 0; // non-zero when retrying our last document.
while (true) {
@@ -205,30 +205,30 @@ Status cloneCollectionAsCapped(OperationContext* txn,
try {
// Make sure we are working with the latest version of the document.
- if (objToClone.snapshotId() != txn->recoveryUnit()->getSnapshotId() &&
- !fromCollection->findDoc(txn, loc, &objToClone)) {
+ if (objToClone.snapshotId() != opCtx->recoveryUnit()->getSnapshotId() &&
+ !fromCollection->findDoc(opCtx, loc, &objToClone)) {
// doc was deleted so don't clone it.
retries = 0;
continue;
}
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
OpDebug* const nullOpDebug = nullptr;
toCollection->insertDocument(
- txn, objToClone.value(), nullOpDebug, true, txn->writesAreReplicated());
+ opCtx, objToClone.value(), nullOpDebug, true, opCtx->writesAreReplicated());
wunit.commit();
// Go to the next document
retries = 0;
} catch (const WriteConflictException& wce) {
- CurOp::get(txn)->debug().writeConflicts++;
+ CurOp::get(opCtx)->debug().writeConflicts++;
retries++; // logAndBackoff expects this to be 1 on first call.
wce.logAndBackoff(retries, "cloneCollectionAsCapped", fromNs);
// Can't use WRITE_CONFLICT_RETRY_LOOP macros since we need to save/restore exec
// around call to abandonSnapshot.
exec->saveState();
- txn->recoveryUnit()->abandonSnapshot();
+ opCtx->recoveryUnit()->abandonSnapshot();
exec->restoreState(); // Handles any WCEs internally.
}
}
@@ -236,15 +236,17 @@ Status cloneCollectionAsCapped(OperationContext* txn,
invariant(false); // unreachable
}
-Status convertToCapped(OperationContext* txn, const NamespaceString& collectionName, double size) {
+Status convertToCapped(OperationContext* opCtx,
+ const NamespaceString& collectionName,
+ double size) {
StringData dbname = collectionName.db();
StringData shortSource = collectionName.coll();
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetDb autoDb(txn, collectionName.db(), MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ AutoGetDb autoDb(opCtx, collectionName.db(), MODE_X);
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, collectionName);
+ bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, collectionName);
if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::NotMaster,
@@ -264,18 +266,18 @@ Status convertToCapped(OperationContext* txn, const NamespaceString& collectionN
std::string longTmpName = str::stream() << dbname << "." << shortTmpName;
if (db->getCollection(longTmpName)) {
- WriteUnitOfWork wunit(txn);
- Status status = db->dropCollection(txn, longTmpName);
+ WriteUnitOfWork wunit(opCtx);
+ Status status = db->dropCollection(opCtx, longTmpName);
if (!status.isOK())
return status;
}
- const bool shouldReplicateWrites = txn->writesAreReplicated();
- txn->setReplicatedWrites(false);
- ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, txn, shouldReplicateWrites);
+ const bool shouldReplicateWrites = opCtx->writesAreReplicated();
+ opCtx->setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, opCtx, shouldReplicateWrites);
Status status =
- cloneCollectionAsCapped(txn, db, shortSource.toString(), shortTmpName, size, true);
+ cloneCollectionAsCapped(opCtx, db, shortSource.toString(), shortTmpName, size, true);
if (!status.isOK()) {
return status;
@@ -284,18 +286,18 @@ Status convertToCapped(OperationContext* txn, const NamespaceString& collectionN
verify(db->getCollection(longTmpName));
{
- WriteUnitOfWork wunit(txn);
- status = db->dropCollection(txn, collectionName.ns());
- txn->setReplicatedWrites(shouldReplicateWrites);
+ WriteUnitOfWork wunit(opCtx);
+ status = db->dropCollection(opCtx, collectionName.ns());
+ opCtx->setReplicatedWrites(shouldReplicateWrites);
if (!status.isOK())
return status;
- status = db->renameCollection(txn, longTmpName, collectionName.ns(), false);
+ status = db->renameCollection(opCtx, longTmpName, collectionName.ns(), false);
if (!status.isOK())
return status;
getGlobalServiceContext()->getOpObserver()->onConvertToCapped(
- txn, NamespaceString(collectionName), size);
+ opCtx, NamespaceString(collectionName), size);
wunit.commit();
}
diff --git a/src/mongo/db/catalog/capped_utils.h b/src/mongo/db/catalog/capped_utils.h
index f7be6dc427e..9ae6a41c505 100644
--- a/src/mongo/db/catalog/capped_utils.h
+++ b/src/mongo/db/catalog/capped_utils.h
@@ -36,12 +36,12 @@ class OperationContext;
/**
* Drops all documents contained in the capped collection, "collectionName".
*/
-Status emptyCapped(OperationContext* txn, const NamespaceString& collectionName);
+Status emptyCapped(OperationContext* opCtx, const NamespaceString& collectionName);
/**
* Clones the collection "shortFrom" to the capped collection "shortTo" with a size of "size".
*/
-Status cloneCollectionAsCapped(OperationContext* txn,
+Status cloneCollectionAsCapped(OperationContext* opCtx,
Database* db,
const std::string& shortFrom,
const std::string& shortTo,
@@ -51,5 +51,5 @@ Status cloneCollectionAsCapped(OperationContext* txn,
/**
* Converts the collection "collectionName" to a capped collection with a size of "size".
*/
-Status convertToCapped(OperationContext* txn, const NamespaceString& collectionName, double size);
+Status convertToCapped(OperationContext* opCtx, const NamespaceString& collectionName, double size);
} // namespace mongo
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index 61f2abc1e3f..fdbfd26677b 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -60,7 +60,7 @@ struct CollModRequest {
BSONElement noPadding = {};
};
-StatusWith<CollModRequest> parseCollModRequest(OperationContext* txn,
+StatusWith<CollModRequest> parseCollModRequest(OperationContext* opCtx,
const NamespaceString& nss,
Collection* coll,
const BSONObj& cmdObj) {
@@ -117,7 +117,7 @@ StatusWith<CollModRequest> parseCollModRequest(OperationContext* txn,
}
if (!indexName.empty()) {
- cmr.idx = coll->getIndexCatalog()->findIndexByName(txn, indexName);
+ cmr.idx = coll->getIndexCatalog()->findIndexByName(opCtx, indexName);
if (!cmr.idx) {
return Status(ErrorCodes::IndexNotFound,
str::stream() << "cannot find index " << indexName << " for ns "
@@ -125,7 +125,8 @@ StatusWith<CollModRequest> parseCollModRequest(OperationContext* txn,
}
} else {
std::vector<IndexDescriptor*> indexes;
- coll->getIndexCatalog()->findIndexesByKeyPattern(txn, keyPattern, false, &indexes);
+ coll->getIndexCatalog()->findIndexesByKeyPattern(
+ opCtx, keyPattern, false, &indexes);
if (indexes.size() > 1) {
return Status(ErrorCodes::AmbiguousIndexKeyPattern,
@@ -214,20 +215,20 @@ StatusWith<CollModRequest> parseCollModRequest(OperationContext* txn,
return {std::move(cmr)};
}
-Status collMod(OperationContext* txn,
+Status collMod(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& cmdObj,
BSONObjBuilder* result) {
StringData dbName = nss.db();
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetDb autoDb(txn, dbName, MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ AutoGetDb autoDb(opCtx, dbName, MODE_X);
Database* const db = autoDb.getDb();
Collection* coll = db ? db->getCollection(nss) : nullptr;
// May also modify a view instead of a collection.
boost::optional<ViewDefinition> view;
if (db && !coll) {
- const auto sharedView = db->getViewCatalog()->lookup(txn, nss.ns());
+ const auto sharedView = db->getViewCatalog()->lookup(opCtx, nss.ns());
if (sharedView) {
// We copy the ViewDefinition as it is modified below to represent the requested state.
view = {*sharedView};
@@ -243,10 +244,10 @@ Status collMod(OperationContext* txn,
return Status(ErrorCodes::NamespaceNotFound, "ns does not exist");
}
- OldClientContext ctx(txn, nss.ns());
+ OldClientContext ctx(opCtx, nss.ns());
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, nss);
+ bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, nss);
if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::NotMaster,
@@ -254,14 +255,14 @@ Status collMod(OperationContext* txn,
<< nss.ns());
}
- auto statusW = parseCollModRequest(txn, nss, coll, cmdObj);
+ auto statusW = parseCollModRequest(opCtx, nss, coll, cmdObj);
if (!statusW.isOK()) {
return statusW.getStatus();
}
CollModRequest cmr = statusW.getValue();
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
if (view) {
if (!cmr.viewPipeLine.eoo())
@@ -276,7 +277,8 @@ Status collMod(OperationContext* txn,
for (auto& item : view->pipeline()) {
pipeline.append(item);
}
- auto errorStatus = catalog->modifyView(txn, nss, view->viewOn(), BSONArray(pipeline.obj()));
+ auto errorStatus =
+ catalog->modifyView(opCtx, nss, view->viewOn(), BSONArray(pipeline.obj()));
if (!errorStatus.isOK()) {
return errorStatus;
}
@@ -289,21 +291,21 @@ Status collMod(OperationContext* txn,
result->appendAs(oldExpireSecs, "expireAfterSeconds_old");
// Change the value of "expireAfterSeconds" on disk.
coll->getCatalogEntry()->updateTTLSetting(
- txn, cmr.idx->indexName(), newExpireSecs.safeNumberLong());
+ opCtx, cmr.idx->indexName(), newExpireSecs.safeNumberLong());
// Notify the index catalog that the definition of this index changed.
- cmr.idx = coll->getIndexCatalog()->refreshEntry(txn, cmr.idx);
+ cmr.idx = coll->getIndexCatalog()->refreshEntry(opCtx, cmr.idx);
result->appendAs(newExpireSecs, "expireAfterSeconds_new");
}
}
if (!cmr.collValidator.eoo())
- coll->setValidator(txn, cmr.collValidator.Obj());
+ coll->setValidator(opCtx, cmr.collValidator.Obj());
if (!cmr.collValidationAction.empty())
- coll->setValidationAction(txn, cmr.collValidationAction);
+ coll->setValidationAction(opCtx, cmr.collValidationAction);
if (!cmr.collValidationLevel.empty())
- coll->setValidationLevel(txn, cmr.collValidationLevel);
+ coll->setValidationLevel(opCtx, cmr.collValidationLevel);
auto setCollectionOption = [&](BSONElement& COElement) {
typedef CollectionOptions CO;
@@ -315,7 +317,7 @@ Status collMod(OperationContext* txn,
CollectionCatalogEntry* cce = coll->getCatalogEntry();
- const int oldFlags = cce->getCollectionOptions(txn).flags;
+ const int oldFlags = cce->getCollectionOptions(opCtx).flags;
const bool oldSetting = oldFlags & flag;
const bool newSetting = COElement.trueValue();
@@ -327,9 +329,9 @@ Status collMod(OperationContext* txn,
// NOTE we do this unconditionally to ensure that we note that the user has
// explicitly set flags, even if they are just setting the default.
- cce->updateFlags(txn, newFlags);
+ cce->updateFlags(opCtx, newFlags);
- const CollectionOptions newOptions = cce->getCollectionOptions(txn);
+ const CollectionOptions newOptions = cce->getCollectionOptions(opCtx);
invariant(newOptions.flags == newFlags);
invariant(newOptions.flagsSet);
};
@@ -345,7 +347,7 @@ Status collMod(OperationContext* txn,
// Only observe non-view collMods, as view operations are observed as operations on the
// system.views collection.
getGlobalServiceContext()->getOpObserver()->onCollMod(
- txn, (dbName.toString() + ".$cmd").c_str(), cmdObj);
+ opCtx, (dbName.toString() + ".$cmd").c_str(), cmdObj);
}
wunit.commit();
diff --git a/src/mongo/db/catalog/coll_mod.h b/src/mongo/db/catalog/coll_mod.h
index bb86bbc50d1..9442fc7d82e 100644
--- a/src/mongo/db/catalog/coll_mod.h
+++ b/src/mongo/db/catalog/coll_mod.h
@@ -38,7 +38,7 @@ class OperationContext;
struct CollModRequest;
-StatusWith<CollModRequest> parseCollModRequest(OperationContext* txn,
+StatusWith<CollModRequest> parseCollModRequest(OperationContext* opCtx,
const NamespaceString& nss,
Collection* coll,
const BSONObj& cmdObj);
@@ -46,7 +46,7 @@ StatusWith<CollModRequest> parseCollModRequest(OperationContext* txn,
/**
* Performs the collection modification described in "cmdObj" on the collection "ns".
*/
-Status collMod(OperationContext* txn,
+Status collMod(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& cmdObj,
BSONObjBuilder* result);
diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp
index 1edad781f0c..8ef63ac7ad7 100644
--- a/src/mongo/db/catalog/collection.cpp
+++ b/src/mongo/db/catalog/collection.cpp
@@ -104,7 +104,7 @@ Status checkValidatorForBannedExpressions(const BSONObj& validator) {
// Uses the collator factory to convert the BSON representation of a collator to a
// CollatorInterface. Returns null if the BSONObj is empty. We expect the stored collation to be
// valid, since it gets validated on collection create.
-std::unique_ptr<CollatorInterface> parseCollation(OperationContext* txn,
+std::unique_ptr<CollatorInterface> parseCollation(OperationContext* opCtx,
const NamespaceString& nss,
BSONObj collationSpec) {
if (collationSpec.isEmpty()) {
@@ -112,7 +112,7 @@ std::unique_ptr<CollatorInterface> parseCollation(OperationContext* txn,
}
auto collator =
- CollatorFactoryInterface::get(txn->getServiceContext())->makeFromBSON(collationSpec);
+ CollatorFactoryInterface::get(opCtx->getServiceContext())->makeFromBSON(collationSpec);
// If the collection's default collator has a version not currently supported by our ICU
// integration, shut down the server. Errors other than IncompatibleCollationVersion should not
@@ -209,7 +209,7 @@ bool CappedInsertNotifier::isDead() {
// ----
-Collection::Collection(OperationContext* txn,
+Collection::Collection(OperationContext* opCtx,
StringData fullNS,
CollectionCatalogEntry* details,
RecordStore* recordStore,
@@ -221,23 +221,23 @@ Collection::Collection(OperationContext* txn,
_needCappedLock(supportsDocLocking() && _recordStore->isCapped() && _ns.db() != "local"),
_infoCache(this),
_indexCatalog(this),
- _collator(parseCollation(txn, _ns, _details->getCollectionOptions(txn).collation)),
- _validatorDoc(_details->getCollectionOptions(txn).validator.getOwned()),
+ _collator(parseCollation(opCtx, _ns, _details->getCollectionOptions(opCtx).collation)),
+ _validatorDoc(_details->getCollectionOptions(opCtx).validator.getOwned()),
_validator(uassertStatusOK(parseValidator(_validatorDoc))),
_validationAction(uassertStatusOK(
- parseValidationAction(_details->getCollectionOptions(txn).validationAction))),
+ parseValidationAction(_details->getCollectionOptions(opCtx).validationAction))),
_validationLevel(uassertStatusOK(
- parseValidationLevel(_details->getCollectionOptions(txn).validationLevel))),
+ parseValidationLevel(_details->getCollectionOptions(opCtx).validationLevel))),
_cursorManager(fullNS),
_cappedNotifier(_recordStore->isCapped() ? new CappedInsertNotifier() : nullptr),
_mustTakeCappedLockOnInsert(isCapped() && !_ns.isSystemDotProfile() && !_ns.isOplog()) {
_magic = 1357924;
- _indexCatalog.init(txn);
+ _indexCatalog.init(opCtx);
if (isCapped())
_recordStore->setCappedCallback(this);
- _infoCache.init(txn);
+ _infoCache.init(opCtx);
}
Collection::~Collection() {
@@ -265,45 +265,45 @@ bool Collection::requiresIdIndex() const {
return true;
}
-std::unique_ptr<SeekableRecordCursor> Collection::getCursor(OperationContext* txn,
+std::unique_ptr<SeekableRecordCursor> Collection::getCursor(OperationContext* opCtx,
bool forward) const {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
invariant(ok());
- return _recordStore->getCursor(txn, forward);
+ return _recordStore->getCursor(opCtx, forward);
}
-vector<std::unique_ptr<RecordCursor>> Collection::getManyCursors(OperationContext* txn) const {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
+vector<std::unique_ptr<RecordCursor>> Collection::getManyCursors(OperationContext* opCtx) const {
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
- return _recordStore->getManyCursors(txn);
+ return _recordStore->getManyCursors(opCtx);
}
-Snapshotted<BSONObj> Collection::docFor(OperationContext* txn, const RecordId& loc) const {
- return Snapshotted<BSONObj>(txn->recoveryUnit()->getSnapshotId(),
- _recordStore->dataFor(txn, loc).releaseToBson());
+Snapshotted<BSONObj> Collection::docFor(OperationContext* opCtx, const RecordId& loc) const {
+ return Snapshotted<BSONObj>(opCtx->recoveryUnit()->getSnapshotId(),
+ _recordStore->dataFor(opCtx, loc).releaseToBson());
}
-bool Collection::findDoc(OperationContext* txn,
+bool Collection::findDoc(OperationContext* opCtx,
const RecordId& loc,
Snapshotted<BSONObj>* out) const {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
RecordData rd;
- if (!_recordStore->findRecord(txn, loc, &rd))
+ if (!_recordStore->findRecord(opCtx, loc, &rd))
return false;
- *out = Snapshotted<BSONObj>(txn->recoveryUnit()->getSnapshotId(), rd.releaseToBson());
+ *out = Snapshotted<BSONObj>(opCtx->recoveryUnit()->getSnapshotId(), rd.releaseToBson());
return true;
}
-Status Collection::checkValidation(OperationContext* txn, const BSONObj& document) const {
+Status Collection::checkValidation(OperationContext* opCtx, const BSONObj& document) const {
if (!_validator)
return Status::OK();
if (_validationLevel == OFF)
return Status::OK();
- if (documentValidationDisabled(txn))
+ if (documentValidationDisabled(opCtx))
return Status::OK();
if (_validator->matchesBSON(document))
@@ -349,10 +349,10 @@ StatusWithMatchExpression Collection::parseValidator(const BSONObj& validator) c
return statusWithMatcher;
}
-Status Collection::insertDocumentsForOplog(OperationContext* txn,
+Status Collection::insertDocumentsForOplog(OperationContext* opCtx,
const DocWriter* const* docs,
size_t nDocs) {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
// Since this is only for the OpLog, we can assume these for simplicity.
// This also means that we do not need to forward this object to the OpObserver, which is good
@@ -361,17 +361,17 @@ Status Collection::insertDocumentsForOplog(OperationContext* txn,
invariant(!_indexCatalog.haveAnyIndexes());
invariant(!_mustTakeCappedLockOnInsert);
- Status status = _recordStore->insertRecordsWithDocWriter(txn, docs, nDocs);
+ Status status = _recordStore->insertRecordsWithDocWriter(opCtx, docs, nDocs);
if (!status.isOK())
return status;
- txn->recoveryUnit()->onCommit([this]() { notifyCappedWaitersIfNeeded(); });
+ opCtx->recoveryUnit()->onCommit([this]() { notifyCappedWaitersIfNeeded(); });
return status;
}
-Status Collection::insertDocuments(OperationContext* txn,
+Status Collection::insertDocuments(OperationContext* opCtx,
const vector<BSONObj>::const_iterator begin,
const vector<BSONObj>::const_iterator end,
OpDebug* opDebug,
@@ -392,7 +392,7 @@ Status Collection::insertDocuments(OperationContext* txn,
}
// Should really be done in the collection object at creation and updated on index create.
- const bool hasIdIndex = _indexCatalog.findIdIndex(txn);
+ const bool hasIdIndex = _indexCatalog.findIdIndex(opCtx);
for (auto it = begin; it != end; it++) {
if (hasIdIndex && (*it)["_id"].eoo()) {
@@ -402,39 +402,39 @@ Status Collection::insertDocuments(OperationContext* txn,
<< _ns.ns());
}
- auto status = checkValidation(txn, *it);
+ auto status = checkValidation(opCtx, *it);
if (!status.isOK())
return status;
}
- const SnapshotId sid = txn->recoveryUnit()->getSnapshotId();
+ const SnapshotId sid = opCtx->recoveryUnit()->getSnapshotId();
if (_mustTakeCappedLockOnInsert)
- synchronizeOnCappedInFlightResource(txn->lockState(), _ns);
+ synchronizeOnCappedInFlightResource(opCtx->lockState(), _ns);
- Status status = _insertDocuments(txn, begin, end, enforceQuota, opDebug);
+ Status status = _insertDocuments(opCtx, begin, end, enforceQuota, opDebug);
if (!status.isOK())
return status;
- invariant(sid == txn->recoveryUnit()->getSnapshotId());
+ invariant(sid == opCtx->recoveryUnit()->getSnapshotId());
- getGlobalServiceContext()->getOpObserver()->onInserts(txn, ns(), begin, end, fromMigrate);
+ getGlobalServiceContext()->getOpObserver()->onInserts(opCtx, ns(), begin, end, fromMigrate);
- txn->recoveryUnit()->onCommit([this]() { notifyCappedWaitersIfNeeded(); });
+ opCtx->recoveryUnit()->onCommit([this]() { notifyCappedWaitersIfNeeded(); });
return Status::OK();
}
-Status Collection::insertDocument(OperationContext* txn,
+Status Collection::insertDocument(OperationContext* opCtx,
const BSONObj& docToInsert,
OpDebug* opDebug,
bool enforceQuota,
bool fromMigrate) {
vector<BSONObj> docs;
docs.push_back(docToInsert);
- return insertDocuments(txn, docs.begin(), docs.end(), opDebug, enforceQuota, fromMigrate);
+ return insertDocuments(opCtx, docs.begin(), docs.end(), opDebug, enforceQuota, fromMigrate);
}
-Status Collection::insertDocument(OperationContext* txn,
+Status Collection::insertDocument(OperationContext* opCtx,
const BSONObj& doc,
const std::vector<MultiIndexBlock*>& indexBlocks,
bool enforceQuota) {
@@ -453,18 +453,18 @@ Status Collection::insertDocument(OperationContext* txn,
}
{
- auto status = checkValidation(txn, doc);
+ auto status = checkValidation(opCtx, doc);
if (!status.isOK())
return status;
}
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
if (_mustTakeCappedLockOnInsert)
- synchronizeOnCappedInFlightResource(txn->lockState(), _ns);
+ synchronizeOnCappedInFlightResource(opCtx->lockState(), _ns);
- StatusWith<RecordId> loc =
- _recordStore->insertRecord(txn, doc.objdata(), doc.objsize(), _enforceQuota(enforceQuota));
+ StatusWith<RecordId> loc = _recordStore->insertRecord(
+ opCtx, doc.objdata(), doc.objsize(), _enforceQuota(enforceQuota));
if (!loc.isOK())
return loc.getStatus();
@@ -480,19 +480,19 @@ Status Collection::insertDocument(OperationContext* txn,
docs.push_back(doc);
getGlobalServiceContext()->getOpObserver()->onInserts(
- txn, ns(), docs.begin(), docs.end(), false);
+ opCtx, ns(), docs.begin(), docs.end(), false);
- txn->recoveryUnit()->onCommit([this]() { notifyCappedWaitersIfNeeded(); });
+ opCtx->recoveryUnit()->onCommit([this]() { notifyCappedWaitersIfNeeded(); });
return loc.getStatus();
}
-Status Collection::_insertDocuments(OperationContext* txn,
+Status Collection::_insertDocuments(OperationContext* opCtx,
const vector<BSONObj>::const_iterator begin,
const vector<BSONObj>::const_iterator end,
bool enforceQuota,
OpDebug* opDebug) {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
const size_t count = std::distance(begin, end);
if (isCapped() && _indexCatalog.haveAnyIndexes() && count > 1) {
@@ -509,7 +509,7 @@ Status Collection::_insertDocuments(OperationContext* txn,
// prevents the primary from executing with more concurrency than secondaries.
// See SERVER-21646.
Lock::ResourceLock heldUntilEndOfWUOW{
- txn->lockState(), ResourceId(RESOURCE_METADATA, _ns.ns()), MODE_X};
+ opCtx->lockState(), ResourceId(RESOURCE_METADATA, _ns.ns()), MODE_X};
}
std::vector<Record> records;
@@ -518,7 +518,7 @@ Status Collection::_insertDocuments(OperationContext* txn,
Record record = {RecordId(), RecordData(it->objdata(), it->objsize())};
records.push_back(record);
}
- Status status = _recordStore->insertRecords(txn, &records, _enforceQuota(enforceQuota));
+ Status status = _recordStore->insertRecords(opCtx, &records, _enforceQuota(enforceQuota));
if (!status.isOK())
return status;
@@ -535,7 +535,7 @@ Status Collection::_insertDocuments(OperationContext* txn,
}
int64_t keysInserted;
- status = _indexCatalog.indexRecords(txn, bsonRecords, &keysInserted);
+ status = _indexCatalog.indexRecords(opCtx, bsonRecords, &keysInserted);
if (opDebug) {
opDebug->keysInserted += keysInserted;
}
@@ -551,15 +551,15 @@ void Collection::notifyCappedWaitersIfNeeded() {
_cappedNotifier->notifyAll();
}
-Status Collection::aboutToDeleteCapped(OperationContext* txn,
+Status Collection::aboutToDeleteCapped(OperationContext* opCtx,
const RecordId& loc,
RecordData data) {
/* check if any cursors point to us. if so, advance them. */
- _cursorManager.invalidateDocument(txn, loc, INVALIDATION_DELETION);
+ _cursorManager.invalidateDocument(opCtx, loc, INVALIDATION_DELETION);
BSONObj doc = data.releaseToBson();
int64_t* const nullKeysDeleted = nullptr;
- _indexCatalog.unindexRecord(txn, doc, loc, false, nullKeysDeleted);
+ _indexCatalog.unindexRecord(opCtx, doc, loc, false, nullKeysDeleted);
// We are not capturing and reporting to OpDebug the 'keysDeleted' by unindexRecord(). It is
// questionable whether reporting will add diagnostic value to users and may instead be
@@ -571,37 +571,37 @@ Status Collection::aboutToDeleteCapped(OperationContext* txn,
}
void Collection::deleteDocument(
- OperationContext* txn, const RecordId& loc, OpDebug* opDebug, bool fromMigrate, bool noWarn) {
+ OperationContext* opCtx, const RecordId& loc, OpDebug* opDebug, bool fromMigrate, bool noWarn) {
if (isCapped()) {
log() << "failing remove on a capped ns " << _ns;
uasserted(10089, "cannot remove from a capped collection");
return;
}
- Snapshotted<BSONObj> doc = docFor(txn, loc);
+ Snapshotted<BSONObj> doc = docFor(opCtx, loc);
auto deleteState =
- getGlobalServiceContext()->getOpObserver()->aboutToDelete(txn, ns(), doc.value());
+ getGlobalServiceContext()->getOpObserver()->aboutToDelete(opCtx, ns(), doc.value());
/* check if any cursors point to us. if so, advance them. */
- _cursorManager.invalidateDocument(txn, loc, INVALIDATION_DELETION);
+ _cursorManager.invalidateDocument(opCtx, loc, INVALIDATION_DELETION);
int64_t keysDeleted;
- _indexCatalog.unindexRecord(txn, doc.value(), loc, noWarn, &keysDeleted);
+ _indexCatalog.unindexRecord(opCtx, doc.value(), loc, noWarn, &keysDeleted);
if (opDebug) {
opDebug->keysDeleted += keysDeleted;
}
- _recordStore->deleteRecord(txn, loc);
+ _recordStore->deleteRecord(opCtx, loc);
getGlobalServiceContext()->getOpObserver()->onDelete(
- txn, ns(), std::move(deleteState), fromMigrate);
+ opCtx, ns(), std::move(deleteState), fromMigrate);
}
Counter64 moveCounter;
ServerStatusMetricField<Counter64> moveCounterDisplay("record.moves", &moveCounter);
-StatusWith<RecordId> Collection::updateDocument(OperationContext* txn,
+StatusWith<RecordId> Collection::updateDocument(OperationContext* opCtx,
const RecordId& oldLocation,
const Snapshotted<BSONObj>& oldDoc,
const BSONObj& newDoc,
@@ -610,13 +610,13 @@ StatusWith<RecordId> Collection::updateDocument(OperationContext* txn,
OpDebug* opDebug,
OplogUpdateEntryArgs* args) {
{
- auto status = checkValidation(txn, newDoc);
+ auto status = checkValidation(opCtx, newDoc);
if (!status.isOK()) {
if (_validationLevel == STRICT_V) {
return status;
}
// moderate means we have to check the old doc
- auto oldDocStatus = checkValidation(txn, oldDoc.value());
+ auto oldDocStatus = checkValidation(opCtx, oldDoc.value());
if (oldDocStatus.isOK()) {
// transitioning from good -> bad is not ok
return status;
@@ -625,8 +625,8 @@ StatusWith<RecordId> Collection::updateDocument(OperationContext* txn,
}
}
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
- invariant(oldDoc.snapshotId() == txn->recoveryUnit()->getSnapshotId());
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+ invariant(oldDoc.snapshotId() == opCtx->recoveryUnit()->getSnapshotId());
invariant(newDoc.isOwned());
if (_needCappedLock) {
@@ -634,10 +634,10 @@ StatusWith<RecordId> Collection::updateDocument(OperationContext* txn,
// prevents the primary from executing with more concurrency than secondaries.
// See SERVER-21646.
Lock::ResourceLock heldUntilEndOfWUOW{
- txn->lockState(), ResourceId(RESOURCE_METADATA, _ns.ns()), MODE_X};
+ opCtx->lockState(), ResourceId(RESOURCE_METADATA, _ns.ns()), MODE_X};
}
- SnapshotId sid = txn->recoveryUnit()->getSnapshotId();
+ SnapshotId sid = opCtx->recoveryUnit()->getSnapshotId();
BSONElement oldId = oldDoc.value()["_id"];
if (!oldId.eoo() && SimpleBSONElementComparator::kInstance.evaluate(oldId != newDoc["_id"]))
@@ -664,17 +664,17 @@ StatusWith<RecordId> Collection::updateDocument(OperationContext* txn,
// newDoc.
OwnedPointerMap<IndexDescriptor*, UpdateTicket> updateTickets;
if (indexesAffected) {
- IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(txn, true);
+ IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(opCtx, true);
while (ii.more()) {
IndexDescriptor* descriptor = ii.next();
IndexCatalogEntry* entry = ii.catalogEntry(descriptor);
IndexAccessMethod* iam = ii.accessMethod(descriptor);
InsertDeleteOptions options;
- IndexCatalog::prepareInsertDeleteOptions(txn, descriptor, &options);
+ IndexCatalog::prepareInsertDeleteOptions(opCtx, descriptor, &options);
UpdateTicket* updateTicket = new UpdateTicket();
updateTickets.mutableMap()[descriptor] = updateTicket;
- Status ret = iam->validateUpdate(txn,
+ Status ret = iam->validateUpdate(opCtx,
oldDoc.value(),
newDoc,
oldLocation,
@@ -688,18 +688,18 @@ StatusWith<RecordId> Collection::updateDocument(OperationContext* txn,
}
Status updateStatus = _recordStore->updateRecord(
- txn, oldLocation, newDoc.objdata(), newDoc.objsize(), _enforceQuota(enforceQuota), this);
+ opCtx, oldLocation, newDoc.objdata(), newDoc.objsize(), _enforceQuota(enforceQuota), this);
if (updateStatus == ErrorCodes::NeedsDocumentMove) {
return _updateDocumentWithMove(
- txn, oldLocation, oldDoc, newDoc, enforceQuota, opDebug, args, sid);
+ opCtx, oldLocation, oldDoc, newDoc, enforceQuota, opDebug, args, sid);
} else if (!updateStatus.isOK()) {
return updateStatus;
}
// Object did not move. We update each index with each respective UpdateTicket.
if (indexesAffected) {
- IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(txn, true);
+ IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(opCtx, true);
while (ii.more()) {
IndexDescriptor* descriptor = ii.next();
IndexAccessMethod* iam = ii.accessMethod(descriptor);
@@ -707,7 +707,7 @@ StatusWith<RecordId> Collection::updateDocument(OperationContext* txn,
int64_t keysInserted;
int64_t keysDeleted;
Status ret = iam->update(
- txn, *updateTickets.mutableMap()[descriptor], &keysInserted, &keysDeleted);
+ opCtx, *updateTickets.mutableMap()[descriptor], &keysInserted, &keysDeleted);
if (!ret.isOK())
return StatusWith<RecordId>(ret);
if (opDebug) {
@@ -717,15 +717,15 @@ StatusWith<RecordId> Collection::updateDocument(OperationContext* txn,
}
}
- invariant(sid == txn->recoveryUnit()->getSnapshotId());
+ invariant(sid == opCtx->recoveryUnit()->getSnapshotId());
args->updatedDoc = newDoc;
- getGlobalServiceContext()->getOpObserver()->onUpdate(txn, *args);
+ getGlobalServiceContext()->getOpObserver()->onUpdate(opCtx, *args);
return {oldLocation};
}
-StatusWith<RecordId> Collection::_updateDocumentWithMove(OperationContext* txn,
+StatusWith<RecordId> Collection::_updateDocumentWithMove(OperationContext* opCtx,
const RecordId& oldLocation,
const Snapshotted<BSONObj>& oldDoc,
const BSONObj& newDoc,
@@ -735,21 +735,21 @@ StatusWith<RecordId> Collection::_updateDocumentWithMove(OperationContext* txn,
const SnapshotId& sid) {
// Insert new record.
StatusWith<RecordId> newLocation = _recordStore->insertRecord(
- txn, newDoc.objdata(), newDoc.objsize(), _enforceQuota(enforceQuota));
+ opCtx, newDoc.objdata(), newDoc.objsize(), _enforceQuota(enforceQuota));
if (!newLocation.isOK()) {
return newLocation;
}
invariant(newLocation.getValue() != oldLocation);
- _cursorManager.invalidateDocument(txn, oldLocation, INVALIDATION_DELETION);
+ _cursorManager.invalidateDocument(opCtx, oldLocation, INVALIDATION_DELETION);
// Remove indexes for old record.
int64_t keysDeleted;
- _indexCatalog.unindexRecord(txn, oldDoc.value(), oldLocation, true, &keysDeleted);
+ _indexCatalog.unindexRecord(opCtx, oldDoc.value(), oldLocation, true, &keysDeleted);
// Remove old record.
- _recordStore->deleteRecord(txn, oldLocation);
+ _recordStore->deleteRecord(opCtx, oldLocation);
std::vector<BsonRecord> bsonRecords;
BsonRecord bsonRecord = {newLocation.getValue(), &newDoc};
@@ -757,15 +757,15 @@ StatusWith<RecordId> Collection::_updateDocumentWithMove(OperationContext* txn,
// Add indexes for new record.
int64_t keysInserted;
- Status status = _indexCatalog.indexRecords(txn, bsonRecords, &keysInserted);
+ Status status = _indexCatalog.indexRecords(opCtx, bsonRecords, &keysInserted);
if (!status.isOK()) {
return StatusWith<RecordId>(status);
}
- invariant(sid == txn->recoveryUnit()->getSnapshotId());
+ invariant(sid == opCtx->recoveryUnit()->getSnapshotId());
args->updatedDoc = newDoc;
- getGlobalServiceContext()->getOpObserver()->onUpdate(txn, *args);
+ getGlobalServiceContext()->getOpObserver()->onUpdate(opCtx, *args);
moveCounter.increment();
if (opDebug) {
@@ -777,9 +777,9 @@ StatusWith<RecordId> Collection::_updateDocumentWithMove(OperationContext* txn,
return newLocation;
}
-Status Collection::recordStoreGoingToUpdateInPlace(OperationContext* txn, const RecordId& loc) {
+Status Collection::recordStoreGoingToUpdateInPlace(OperationContext* opCtx, const RecordId& loc) {
// Broadcast the mutation so that query results stay correct.
- _cursorManager.invalidateDocument(txn, loc, INVALIDATION_MUTATION);
+ _cursorManager.invalidateDocument(opCtx, loc, INVALIDATION_MUTATION);
return Status::OK();
}
@@ -792,26 +792,26 @@ bool Collection::updateWithDamagesSupported() const {
}
StatusWith<RecordData> Collection::updateDocumentWithDamages(
- OperationContext* txn,
+ OperationContext* opCtx,
const RecordId& loc,
const Snapshotted<RecordData>& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages,
OplogUpdateEntryArgs* args) {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
- invariant(oldRec.snapshotId() == txn->recoveryUnit()->getSnapshotId());
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+ invariant(oldRec.snapshotId() == opCtx->recoveryUnit()->getSnapshotId());
invariant(updateWithDamagesSupported());
// Broadcast the mutation so that query results stay correct.
- _cursorManager.invalidateDocument(txn, loc, INVALIDATION_MUTATION);
+ _cursorManager.invalidateDocument(opCtx, loc, INVALIDATION_MUTATION);
auto newRecStatus =
- _recordStore->updateWithDamages(txn, loc, oldRec.value(), damageSource, damages);
+ _recordStore->updateWithDamages(opCtx, loc, oldRec.value(), damageSource, damages);
if (newRecStatus.isOK()) {
args->updatedDoc = newRecStatus.getValue().toBson();
- getGlobalServiceContext()->getOpObserver()->onUpdate(txn, *args);
+ getGlobalServiceContext()->getOpObserver()->onUpdate(opCtx, *args);
}
return newRecStatus;
}
@@ -841,12 +841,12 @@ std::shared_ptr<CappedInsertNotifier> Collection::getCappedInsertNotifier() cons
return _cappedNotifier;
}
-uint64_t Collection::numRecords(OperationContext* txn) const {
- return _recordStore->numRecords(txn);
+uint64_t Collection::numRecords(OperationContext* opCtx) const {
+ return _recordStore->numRecords(opCtx);
}
-uint64_t Collection::dataSize(OperationContext* txn) const {
- return _recordStore->dataSize(txn);
+uint64_t Collection::dataSize(OperationContext* opCtx) const {
+ return _recordStore->dataSize(opCtx);
}
uint64_t Collection::getIndexSize(OperationContext* opCtx, BSONObjBuilder* details, int scale) {
@@ -878,15 +878,15 @@ uint64_t Collection::getIndexSize(OperationContext* opCtx, BSONObjBuilder* detai
* 3) truncate record store
* 4) re-write indexes
*/
-Status Collection::truncate(OperationContext* txn) {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
+Status Collection::truncate(OperationContext* opCtx) {
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
BackgroundOperation::assertNoBgOpInProgForNs(ns());
- invariant(_indexCatalog.numIndexesInProgress(txn) == 0);
+ invariant(_indexCatalog.numIndexesInProgress(opCtx) == 0);
// 1) store index specs
vector<BSONObj> indexSpecs;
{
- IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(txn, false);
+ IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(opCtx, false);
while (ii.more()) {
const IndexDescriptor* idx = ii.next();
indexSpecs.push_back(idx->infoObj().getOwned());
@@ -894,19 +894,19 @@ Status Collection::truncate(OperationContext* txn) {
}
// 2) drop indexes
- Status status = _indexCatalog.dropAllIndexes(txn, true);
+ Status status = _indexCatalog.dropAllIndexes(opCtx, true);
if (!status.isOK())
return status;
_cursorManager.invalidateAll(false, "collection truncated");
// 3) truncate record store
- status = _recordStore->truncate(txn);
+ status = _recordStore->truncate(opCtx);
if (!status.isOK())
return status;
// 4) re-create indexes
for (size_t i = 0; i < indexSpecs.size(); i++) {
- status = _indexCatalog.createIndexOnEmptyCollection(txn, indexSpecs[i]).getStatus();
+ status = _indexCatalog.createIndexOnEmptyCollection(opCtx, indexSpecs[i]).getStatus();
if (!status.isOK())
return status;
}
@@ -914,18 +914,18 @@ Status Collection::truncate(OperationContext* txn) {
return Status::OK();
}
-void Collection::cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive) {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+void Collection::cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) {
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
invariant(isCapped());
BackgroundOperation::assertNoBgOpInProgForNs(ns());
- invariant(_indexCatalog.numIndexesInProgress(txn) == 0);
+ invariant(_indexCatalog.numIndexesInProgress(opCtx) == 0);
_cursorManager.invalidateAll(false, "capped collection truncated");
- _recordStore->cappedTruncateAfter(txn, end, inclusive);
+ _recordStore->cappedTruncateAfter(opCtx, end, inclusive);
}
-Status Collection::setValidator(OperationContext* txn, BSONObj validatorDoc) {
- invariant(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
+Status Collection::setValidator(OperationContext* opCtx, BSONObj validatorDoc) {
+ invariant(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
// Make owned early so that the parsed match expression refers to the owned object.
if (!validatorDoc.isOwned())
@@ -935,7 +935,7 @@ Status Collection::setValidator(OperationContext* txn, BSONObj validatorDoc) {
if (!statusWithMatcher.isOK())
return statusWithMatcher.getStatus();
- _details->updateValidator(txn, validatorDoc, getValidationLevel(), getValidationAction());
+ _details->updateValidator(opCtx, validatorDoc, getValidationLevel(), getValidationAction());
_validator = std::move(statusWithMatcher.getValue());
_validatorDoc = std::move(validatorDoc);
@@ -994,8 +994,8 @@ StringData Collection::getValidationAction() const {
MONGO_UNREACHABLE;
}
-Status Collection::setValidationLevel(OperationContext* txn, StringData newLevel) {
- invariant(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
+Status Collection::setValidationLevel(OperationContext* opCtx, StringData newLevel) {
+ invariant(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
StatusWith<ValidationLevel> status = parseValidationLevel(newLevel);
if (!status.isOK()) {
@@ -1004,13 +1004,13 @@ Status Collection::setValidationLevel(OperationContext* txn, StringData newLevel
_validationLevel = status.getValue();
- _details->updateValidator(txn, _validatorDoc, getValidationLevel(), getValidationAction());
+ _details->updateValidator(opCtx, _validatorDoc, getValidationLevel(), getValidationAction());
return Status::OK();
}
-Status Collection::setValidationAction(OperationContext* txn, StringData newAction) {
- invariant(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
+Status Collection::setValidationAction(OperationContext* opCtx, StringData newAction) {
+ invariant(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
StatusWith<ValidationAction> status = parseValidationAction(newAction);
if (!status.isOK()) {
@@ -1019,7 +1019,7 @@ Status Collection::setValidationAction(OperationContext* txn, StringData newActi
_validationAction = status.getValue();
- _details->updateValidator(txn, _validatorDoc, getValidationLevel(), getValidationAction());
+ _details->updateValidator(opCtx, _validatorDoc, getValidationLevel(), getValidationAction());
return Status::OK();
}
@@ -1037,11 +1037,11 @@ using ValidateResultsMap = std::map<std::string, ValidateResults>;
class RecordStoreValidateAdaptor : public ValidateAdaptor {
public:
- RecordStoreValidateAdaptor(OperationContext* txn,
+ RecordStoreValidateAdaptor(OperationContext* opCtx,
ValidateCmdLevel level,
IndexCatalog* ic,
ValidateResultsMap* irm)
- : _txn(txn), _level(level), _indexCatalog(ic), _indexNsResultsMap(irm) {
+ : _opCtx(opCtx), _level(level), _indexCatalog(ic), _indexNsResultsMap(irm) {
_ikc = std::unique_ptr<IndexKeyCountTable>(new IndexKeyCountTable());
}
@@ -1068,7 +1068,7 @@ public:
return status;
}
- IndexCatalog::IndexIterator i = _indexCatalog->getIndexIterator(_txn, false);
+ IndexCatalog::IndexIterator i = _indexCatalog->getIndexIterator(_opCtx, false);
while (i.more()) {
const IndexDescriptor* descriptor = i.next();
@@ -1097,7 +1097,7 @@ public:
&documentKeySet,
multikeyPaths);
- if (!descriptor->isMultikey(_txn) && documentKeySet.size() > 1) {
+ if (!descriptor->isMultikey(_opCtx) && documentKeySet.size() > 1) {
string msg = str::stream() << "Index " << descriptor->indexName()
<< " is not multi-key but has more than one"
<< " key in document " << recordId;
@@ -1158,7 +1158,7 @@ public:
BSONObj prevIndexEntryKey;
bool isFirstEntry = true;
- std::unique_ptr<SortedDataInterface::Cursor> cursor = iam->newCursor(_txn, true);
+ std::unique_ptr<SortedDataInterface::Cursor> cursor = iam->newCursor(_opCtx, true);
// Seeking to BSONObj() is equivalent to seeking to the first entry of an index.
for (auto indexEntry = cursor->seek(BSONObj(), true); indexEntry;
indexEntry = cursor->next()) {
@@ -1206,7 +1206,7 @@ public:
}
}
- if (results.valid && !idx->isMultikey(_txn) && totalKeys > numRecs) {
+ if (results.valid && !idx->isMultikey(_opCtx) && totalKeys > numRecs) {
string err = str::stream()
<< "index " << idx->indexName() << " is not multi-key, but has more entries ("
<< numIndexedKeys << ") than documents in the index (" << numRecs - numLongKeys
@@ -1249,7 +1249,7 @@ private:
uint32_t _indexKeyCountTableNumEntries = 0;
bool _hasDocWithoutIndexEntry = false;
- OperationContext* _txn; // Not owned.
+ OperationContext* _opCtx; // Not owned.
ValidateCmdLevel _level;
IndexCatalog* _indexCatalog; // Not owned.
ValidateResultsMap* _indexNsResultsMap; // Not owned.
@@ -1264,30 +1264,30 @@ private:
};
} // namespace
-Status Collection::validate(OperationContext* txn,
+Status Collection::validate(OperationContext* opCtx,
ValidateCmdLevel level,
ValidateResults* results,
BSONObjBuilder* output) {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
try {
ValidateResultsMap indexNsResultsMap;
std::unique_ptr<RecordStoreValidateAdaptor> indexValidator(
- new RecordStoreValidateAdaptor(txn, level, &_indexCatalog, &indexNsResultsMap));
+ new RecordStoreValidateAdaptor(opCtx, level, &_indexCatalog, &indexNsResultsMap));
BSONObjBuilder keysPerIndex; // not using subObjStart to be exception safe
- IndexCatalog::IndexIterator i = _indexCatalog.getIndexIterator(txn, false);
+ IndexCatalog::IndexIterator i = _indexCatalog.getIndexIterator(opCtx, false);
// Validate Indexes.
while (i.more()) {
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
const IndexDescriptor* descriptor = i.next();
log(LogComponent::kIndex) << "validating index " << descriptor->indexNamespace()
<< endl;
IndexAccessMethod* iam = _indexCatalog.getIndex(descriptor);
ValidateResults curIndexResults;
int64_t numKeys;
- iam->validate(txn, &numKeys, &curIndexResults);
+ iam->validate(opCtx, &numKeys, &curIndexResults);
keysPerIndex.appendNumber(descriptor->indexNamespace(),
static_cast<long long>(numKeys));
@@ -1302,7 +1302,8 @@ Status Collection::validate(OperationContext* txn,
// Validate RecordStore and, if `level == kValidateFull`, cross validate indexes and
// RecordStore.
if (results->valid) {
- auto status = _recordStore->validate(txn, level, indexValidator.get(), results, output);
+ auto status =
+ _recordStore->validate(opCtx, level, indexValidator.get(), results, output);
// RecordStore::validate always returns Status::OK(). Errors are reported through
// `results`.
dassert(status.isOK());
@@ -1323,14 +1324,14 @@ Status Collection::validate(OperationContext* txn,
// Validate index key count.
if (results->valid) {
- IndexCatalog::IndexIterator i = _indexCatalog.getIndexIterator(txn, false);
+ IndexCatalog::IndexIterator i = _indexCatalog.getIndexIterator(opCtx, false);
while (i.more()) {
IndexDescriptor* descriptor = i.next();
ValidateResults& curIndexResults = indexNsResultsMap[descriptor->indexNamespace()];
if (curIndexResults.valid) {
indexValidator->validateIndexKeyCount(
- descriptor, _recordStore->numRecords(txn), curIndexResults);
+ descriptor, _recordStore->numRecords(opCtx), curIndexResults);
}
}
}
@@ -1365,7 +1366,7 @@ Status Collection::validate(OperationContext* txn,
results->errors.insert(results->errors.end(), vr.errors.begin(), vr.errors.end());
}
- output->append("nIndexes", _indexCatalog.numIndexesReady(txn));
+ output->append("nIndexes", _indexCatalog.numIndexesReady(opCtx));
output->append("keysPerIndex", keysPerIndex.done());
if (indexDetails.get()) {
output->append("indexDetails", indexDetails->done());
@@ -1382,13 +1383,13 @@ Status Collection::validate(OperationContext* txn,
return Status::OK();
}
-Status Collection::touch(OperationContext* txn,
+Status Collection::touch(OperationContext* opCtx,
bool touchData,
bool touchIndexes,
BSONObjBuilder* output) const {
if (touchData) {
BSONObjBuilder b;
- Status status = _recordStore->touch(txn, &b);
+ Status status = _recordStore->touch(opCtx, &b);
if (!status.isOK())
return status;
output->append("data", b.obj());
@@ -1396,17 +1397,18 @@ Status Collection::touch(OperationContext* txn,
if (touchIndexes) {
Timer t;
- IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(txn, false);
+ IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(opCtx, false);
while (ii.more()) {
const IndexDescriptor* desc = ii.next();
const IndexAccessMethod* iam = _indexCatalog.getIndex(desc);
- Status status = iam->touch(txn);
+ Status status = iam->touch(opCtx);
if (!status.isOK())
return status;
}
- output->append("indexes",
- BSON("num" << _indexCatalog.numIndexesTotal(txn) << "millis" << t.millis()));
+ output->append(
+ "indexes",
+ BSON("num" << _indexCatalog.numIndexesTotal(opCtx) << "millis" << t.millis()));
}
return Status::OK();
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index 6e9b1c56b1b..0d04abc5c76 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -178,7 +178,7 @@ private:
*/
class Collection final : CappedCallback, UpdateNotifier {
public:
- Collection(OperationContext* txn,
+ Collection(OperationContext* opCtx,
StringData fullNS,
CollectionCatalogEntry* details, // does not own
RecordStore* recordStore, // does not own
@@ -228,22 +228,22 @@ public:
bool requiresIdIndex() const;
- Snapshotted<BSONObj> docFor(OperationContext* txn, const RecordId& loc) const;
+ Snapshotted<BSONObj> docFor(OperationContext* opCtx, const RecordId& loc) const;
/**
* @param out - contents set to the right docs if exists, or nothing.
* @return true iff loc exists
*/
- bool findDoc(OperationContext* txn, const RecordId& loc, Snapshotted<BSONObj>* out) const;
+ bool findDoc(OperationContext* opCtx, const RecordId& loc, Snapshotted<BSONObj>* out) const;
- std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* txn,
+ std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* opCtx,
bool forward = true) const;
/**
* Returns many cursors that partition the Collection into many disjoint sets. Iterating
* all returned cursors is equivalent to iterating the full collection.
*/
- std::vector<std::unique_ptr<RecordCursor>> getManyCursors(OperationContext* txn) const;
+ std::vector<std::unique_ptr<RecordCursor>> getManyCursors(OperationContext* opCtx) const;
/**
* Deletes the document with the given RecordId from the collection.
@@ -257,7 +257,7 @@ public:
* 'noWarn' if unindexing the record causes an error, if noWarn is true the error
* will not be logged.
*/
- void deleteDocument(OperationContext* txn,
+ void deleteDocument(OperationContext* opCtx,
const RecordId& loc,
OpDebug* opDebug,
bool fromMigrate = false,
@@ -270,7 +270,7 @@ public:
*
* 'opDebug' Optional argument. When not null, will be used to record operation statistics.
*/
- Status insertDocuments(OperationContext* txn,
+ Status insertDocuments(OperationContext* opCtx,
std::vector<BSONObj>::const_iterator begin,
std::vector<BSONObj>::const_iterator end,
OpDebug* opDebug,
@@ -284,7 +284,7 @@ public:
* 'opDebug' Optional argument. When not null, will be used to record operation statistics.
* 'enforceQuota' If false, quotas will be ignored.
*/
- Status insertDocument(OperationContext* txn,
+ Status insertDocument(OperationContext* opCtx,
const BSONObj& doc,
OpDebug* opDebug,
bool enforceQuota,
@@ -294,7 +294,7 @@ public:
* Callers must ensure no document validation is performed for this collection when calling
* this method.
*/
- Status insertDocumentsForOplog(OperationContext* txn,
+ Status insertDocumentsForOplog(OperationContext* opCtx,
const DocWriter* const* docs,
size_t nDocs);
@@ -303,7 +303,7 @@ public:
*
* NOTE: It is up to caller to commit the indexes.
*/
- Status insertDocument(OperationContext* txn,
+ Status insertDocument(OperationContext* opCtx,
const BSONObj& doc,
const std::vector<MultiIndexBlock*>& indexBlocks,
bool enforceQuota);
@@ -317,7 +317,7 @@ public:
* 'opDebug' Optional argument. When not null, will be used to record operation statistics.
* @return the post update location of the doc (may or may not be the same as oldLocation)
*/
- StatusWith<RecordId> updateDocument(OperationContext* txn,
+ StatusWith<RecordId> updateDocument(OperationContext* opCtx,
const RecordId& oldLocation,
const Snapshotted<BSONObj>& oldDoc,
const BSONObj& newDoc,
@@ -335,7 +335,7 @@ public:
* success.
* @return the contents of the updated record.
*/
- StatusWith<RecordData> updateDocumentWithDamages(OperationContext* txn,
+ StatusWith<RecordData> updateDocumentWithDamages(OperationContext* opCtx,
const RecordId& loc,
const Snapshotted<RecordData>& oldRec,
const char* damageSource,
@@ -344,21 +344,21 @@ public:
// -----------
- StatusWith<CompactStats> compact(OperationContext* txn, const CompactOptions* options);
+ StatusWith<CompactStats> compact(OperationContext* opCtx, const CompactOptions* options);
/**
* removes all documents as fast as possible
* indexes before and after will be the same
* as will other characteristics
*/
- Status truncate(OperationContext* txn);
+ Status truncate(OperationContext* opCtx);
/**
* @return OK if the validate run successfully
* OK will be returned even if corruption is found
* deatils will be in result
*/
- Status validate(OperationContext* txn,
+ Status validate(OperationContext* opCtx,
ValidateCmdLevel level,
ValidateResults* results,
BSONObjBuilder* output);
@@ -366,7 +366,7 @@ public:
/**
* forces data into cache
*/
- Status touch(OperationContext* txn,
+ Status touch(OperationContext* opCtx,
bool touchData,
bool touchIndexes,
BSONObjBuilder* output) const;
@@ -377,7 +377,7 @@ public:
* function. An assertion will be thrown if that is attempted.
* @param inclusive - Truncate 'end' as well iff true
*/
- void cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive);
+ void cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive);
enum ValidationAction { WARN, ERROR_V };
enum ValidationLevel { OFF, MODERATE, STRICT_V };
@@ -395,10 +395,10 @@ public:
* An empty validator removes all validation.
* Requires an exclusive lock on the collection.
*/
- Status setValidator(OperationContext* txn, BSONObj validator);
+ Status setValidator(OperationContext* opCtx, BSONObj validator);
- Status setValidationLevel(OperationContext* txn, StringData newLevel);
- Status setValidationAction(OperationContext* txn, StringData newAction);
+ Status setValidationLevel(OperationContext* opCtx, StringData newLevel);
+ Status setValidationAction(OperationContext* opCtx, StringData newAction);
StringData getValidationLevel() const;
StringData getValidationAction() const;
@@ -419,15 +419,15 @@ public:
*/
std::shared_ptr<CappedInsertNotifier> getCappedInsertNotifier() const;
- uint64_t numRecords(OperationContext* txn) const;
+ uint64_t numRecords(OperationContext* opCtx) const;
- uint64_t dataSize(OperationContext* txn) const;
+ uint64_t dataSize(OperationContext* opCtx) const;
- int averageObjectSize(OperationContext* txn) const {
- uint64_t n = numRecords(txn);
+ int averageObjectSize(OperationContext* opCtx) const {
+ uint64_t n = numRecords(opCtx);
if (n == 0)
return 5;
- return static_cast<int>(dataSize(txn) / n);
+ return static_cast<int>(dataSize(opCtx) / n);
}
uint64_t getIndexSize(OperationContext* opCtx, BSONObjBuilder* details = NULL, int scale = 1);
@@ -459,20 +459,20 @@ private:
/**
* Returns a non-ok Status if document does not pass this collection's validator.
*/
- Status checkValidation(OperationContext* txn, const BSONObj& document) const;
+ Status checkValidation(OperationContext* opCtx, const BSONObj& document) const;
- Status recordStoreGoingToUpdateInPlace(OperationContext* txn, const RecordId& loc);
+ Status recordStoreGoingToUpdateInPlace(OperationContext* opCtx, const RecordId& loc);
- Status aboutToDeleteCapped(OperationContext* txn, const RecordId& loc, RecordData data);
+ Status aboutToDeleteCapped(OperationContext* opCtx, const RecordId& loc, RecordData data);
/**
* same semantics as insertDocument, but doesn't do:
* - some user error checks
* - adjust padding
*/
- Status _insertDocument(OperationContext* txn, const BSONObj& doc, bool enforceQuota);
+ Status _insertDocument(OperationContext* opCtx, const BSONObj& doc, bool enforceQuota);
- Status _insertDocuments(OperationContext* txn,
+ Status _insertDocuments(OperationContext* opCtx,
std::vector<BSONObj>::const_iterator begin,
std::vector<BSONObj>::const_iterator end,
bool enforceQuota,
@@ -482,7 +482,7 @@ private:
/**
* Perform update when document move will be required.
*/
- StatusWith<RecordId> _updateDocumentWithMove(OperationContext* txn,
+ StatusWith<RecordId> _updateDocumentWithMove(OperationContext* opCtx,
const RecordId& oldLocation,
const Snapshotted<BSONObj>& oldDoc,
const BSONObj& newDoc,
diff --git a/src/mongo/db/catalog/collection_catalog_entry.h b/src/mongo/db/catalog/collection_catalog_entry.h
index 879459839d3..16e7002db05 100644
--- a/src/mongo/db/catalog/collection_catalog_entry.h
+++ b/src/mongo/db/catalog/collection_catalog_entry.h
@@ -52,17 +52,17 @@ public:
// ------- indexes ----------
- virtual CollectionOptions getCollectionOptions(OperationContext* txn) const = 0;
+ virtual CollectionOptions getCollectionOptions(OperationContext* opCtx) const = 0;
- virtual int getTotalIndexCount(OperationContext* txn) const = 0;
+ virtual int getTotalIndexCount(OperationContext* opCtx) const = 0;
- virtual int getCompletedIndexCount(OperationContext* txn) const = 0;
+ virtual int getCompletedIndexCount(OperationContext* opCtx) const = 0;
virtual int getMaxAllowedIndexes() const = 0;
- virtual void getAllIndexes(OperationContext* txn, std::vector<std::string>* names) const = 0;
+ virtual void getAllIndexes(OperationContext* opCtx, std::vector<std::string>* names) const = 0;
- virtual BSONObj getIndexSpec(OperationContext* txn, StringData idxName) const = 0;
+ virtual BSONObj getIndexSpec(OperationContext* opCtx, StringData idxName) const = 0;
/**
* Returns true if the index identified by 'indexName' is multikey, and returns false otherwise.
@@ -75,7 +75,7 @@ public:
* multikey information, then 'multikeyPaths' is initialized as a vector with size equal to the
* number of elements in the index key pattern of empty sets.
*/
- virtual bool isIndexMultikey(OperationContext* txn,
+ virtual bool isIndexMultikey(OperationContext* opCtx,
StringData indexName,
MultikeyPaths* multikeyPaths) const = 0;
@@ -88,29 +88,29 @@ public:
*
* This function returns true if the index metadata has changed, and returns false otherwise.
*/
- virtual bool setIndexIsMultikey(OperationContext* txn,
+ virtual bool setIndexIsMultikey(OperationContext* opCtx,
StringData indexName,
const MultikeyPaths& multikeyPaths) = 0;
- virtual RecordId getIndexHead(OperationContext* txn, StringData indexName) const = 0;
+ virtual RecordId getIndexHead(OperationContext* opCtx, StringData indexName) const = 0;
- virtual void setIndexHead(OperationContext* txn,
+ virtual void setIndexHead(OperationContext* opCtx,
StringData indexName,
const RecordId& newHead) = 0;
- virtual bool isIndexReady(OperationContext* txn, StringData indexName) const = 0;
+ virtual bool isIndexReady(OperationContext* opCtx, StringData indexName) const = 0;
- virtual Status removeIndex(OperationContext* txn, StringData indexName) = 0;
+ virtual Status removeIndex(OperationContext* opCtx, StringData indexName) = 0;
- virtual Status prepareForIndexBuild(OperationContext* txn, const IndexDescriptor* spec) = 0;
+ virtual Status prepareForIndexBuild(OperationContext* opCtx, const IndexDescriptor* spec) = 0;
- virtual void indexBuildSuccess(OperationContext* txn, StringData indexName) = 0;
+ virtual void indexBuildSuccess(OperationContext* opCtx, StringData indexName) = 0;
/* Updates the expireAfterSeconds field of the given index to the value in newExpireSecs.
* The specified index must already contain an expireAfterSeconds field, and the value in
* that field and newExpireSecs must both be numeric.
*/
- virtual void updateTTLSetting(OperationContext* txn,
+ virtual void updateTTLSetting(OperationContext* opCtx,
StringData idxName,
long long newExpireSeconds) = 0;
@@ -118,14 +118,14 @@ public:
* Sets the flags field of CollectionOptions to newValue.
* Subsequent calls to getCollectionOptions should have flags==newValue and flagsSet==true.
*/
- virtual void updateFlags(OperationContext* txn, int newValue) = 0;
+ virtual void updateFlags(OperationContext* opCtx, int newValue) = 0;
/**
* Updates the validator for this collection.
*
* An empty validator removes all validation.
*/
- virtual void updateValidator(OperationContext* txn,
+ virtual void updateValidator(OperationContext* opCtx,
const BSONObj& validator,
StringData validationLevel,
StringData validationAction) = 0;
diff --git a/src/mongo/db/catalog/collection_compact.cpp b/src/mongo/db/catalog/collection_compact.cpp
index 4c64eb8f239..69fd76bcfe9 100644
--- a/src/mongo/db/catalog/collection_compact.cpp
+++ b/src/mongo/db/catalog/collection_compact.cpp
@@ -107,11 +107,11 @@ private:
}
-StatusWith<CompactStats> Collection::compact(OperationContext* txn,
+StatusWith<CompactStats> Collection::compact(OperationContext* opCtx,
const CompactOptions* compactOptions) {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
- DisableDocumentValidation validationDisabler(txn);
+ DisableDocumentValidation validationDisabler(opCtx);
if (!_recordStore->compactSupported())
return StatusWith<CompactStats>(ErrorCodes::CommandNotSupported,
@@ -121,18 +121,18 @@ StatusWith<CompactStats> Collection::compact(OperationContext* txn,
if (_recordStore->compactsInPlace()) {
CompactStats stats;
- Status status = _recordStore->compact(txn, NULL, compactOptions, &stats);
+ Status status = _recordStore->compact(opCtx, NULL, compactOptions, &stats);
if (!status.isOK())
return StatusWith<CompactStats>(status);
// Compact all indexes (not including unfinished indexes)
- IndexCatalog::IndexIterator ii(_indexCatalog.getIndexIterator(txn, false));
+ IndexCatalog::IndexIterator ii(_indexCatalog.getIndexIterator(opCtx, false));
while (ii.more()) {
IndexDescriptor* descriptor = ii.next();
IndexAccessMethod* index = _indexCatalog.getIndex(descriptor);
LOG(1) << "compacting index: " << descriptor->toString();
- Status status = index->compact(txn);
+ Status status = index->compact(opCtx);
if (!status.isOK()) {
error() << "failed to compact index: " << descriptor->toString();
return status;
@@ -142,13 +142,13 @@ StatusWith<CompactStats> Collection::compact(OperationContext* txn,
return StatusWith<CompactStats>(stats);
}
- if (_indexCatalog.numIndexesInProgress(txn))
+ if (_indexCatalog.numIndexesInProgress(opCtx))
return StatusWith<CompactStats>(ErrorCodes::BadValue,
"cannot compact when indexes in progress");
vector<BSONObj> indexSpecs;
{
- IndexCatalog::IndexIterator ii(_indexCatalog.getIndexIterator(txn, false));
+ IndexCatalog::IndexIterator ii(_indexCatalog.getIndexIterator(opCtx, false));
while (ii.more()) {
IndexDescriptor* descriptor = ii.next();
@@ -170,14 +170,14 @@ StatusWith<CompactStats> Collection::compact(OperationContext* txn,
}
// Give a chance to be interrupted *before* we drop all indexes.
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
{
// note that the drop indexes call also invalidates all clientcursors for the namespace,
// which is important and wanted here
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
log() << "compact dropping indexes";
- Status status = _indexCatalog.dropAllIndexes(txn, true);
+ Status status = _indexCatalog.dropAllIndexes(opCtx, true);
if (!status.isOK()) {
return StatusWith<CompactStats>(status);
}
@@ -186,7 +186,7 @@ StatusWith<CompactStats> Collection::compact(OperationContext* txn,
CompactStats stats;
- MultiIndexBlock indexer(txn, this);
+ MultiIndexBlock indexer(opCtx, this);
indexer.allowInterruption();
indexer.ignoreUniqueConstraint(); // in compact we should be doing no checking
@@ -196,7 +196,7 @@ StatusWith<CompactStats> Collection::compact(OperationContext* txn,
MyCompactAdaptor adaptor(this, &indexer);
- status = _recordStore->compact(txn, &adaptor, compactOptions, &stats);
+ status = _recordStore->compact(opCtx, &adaptor, compactOptions, &stats);
if (!status.isOK())
return StatusWith<CompactStats>(status);
@@ -206,7 +206,7 @@ StatusWith<CompactStats> Collection::compact(OperationContext* txn,
return StatusWith<CompactStats>(status);
{
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
indexer.commit();
wunit.commit();
}
diff --git a/src/mongo/db/catalog/collection_info_cache.cpp b/src/mongo/db/catalog/collection_info_cache.cpp
index d5431bdb0eb..b601cbd948e 100644
--- a/src/mongo/db/catalog/collection_info_cache.cpp
+++ b/src/mongo/db/catalog/collection_info_cache.cpp
@@ -64,20 +64,20 @@ CollectionInfoCache::~CollectionInfoCache() {
}
}
-const UpdateIndexData& CollectionInfoCache::getIndexKeys(OperationContext* txn) const {
+const UpdateIndexData& CollectionInfoCache::getIndexKeys(OperationContext* opCtx) const {
// This requires "some" lock, and MODE_IS is an expression for that, for now.
- dassert(txn->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_IS));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_IS));
invariant(_keysComputed);
return _indexedPaths;
}
-void CollectionInfoCache::computeIndexKeys(OperationContext* txn) {
+void CollectionInfoCache::computeIndexKeys(OperationContext* opCtx) {
_indexedPaths.clear();
bool hadTTLIndex = _hasTTLIndex;
_hasTTLIndex = false;
- IndexCatalog::IndexIterator i = _collection->getIndexCatalog()->getIndexIterator(txn, true);
+ IndexCatalog::IndexIterator i = _collection->getIndexCatalog()->getIndexIterator(opCtx, true);
while (i.more()) {
IndexDescriptor* descriptor = i.next();
@@ -140,13 +140,13 @@ void CollectionInfoCache::computeIndexKeys(OperationContext* txn) {
_keysComputed = true;
}
-void CollectionInfoCache::notifyOfQuery(OperationContext* txn,
+void CollectionInfoCache::notifyOfQuery(OperationContext* opCtx,
const std::set<std::string>& indexesUsed) {
// Record indexes used to fulfill query.
for (auto it = indexesUsed.begin(); it != indexesUsed.end(); ++it) {
// This index should still exist, since the PlanExecutor would have been killed if the
// index was dropped (and we would not get here).
- dassert(NULL != _collection->getIndexCatalog()->findIndexByName(txn, *it));
+ dassert(NULL != _collection->getIndexCatalog()->findIndexByName(opCtx, *it));
_indexUsageTracker.recordIndexAccess(*it);
}
@@ -167,21 +167,21 @@ QuerySettings* CollectionInfoCache::getQuerySettings() const {
return _querySettings.get();
}
-void CollectionInfoCache::updatePlanCacheIndexEntries(OperationContext* txn) {
+void CollectionInfoCache::updatePlanCacheIndexEntries(OperationContext* opCtx) {
std::vector<IndexEntry> indexEntries;
// TODO We shouldn't need to include unfinished indexes, but we must here because the index
// catalog may be in an inconsistent state. SERVER-18346.
const bool includeUnfinishedIndexes = true;
IndexCatalog::IndexIterator ii =
- _collection->getIndexCatalog()->getIndexIterator(txn, includeUnfinishedIndexes);
+ _collection->getIndexCatalog()->getIndexIterator(opCtx, includeUnfinishedIndexes);
while (ii.more()) {
const IndexDescriptor* desc = ii.next();
const IndexCatalogEntry* ice = ii.catalogEntry(desc);
indexEntries.emplace_back(desc->keyPattern(),
desc->getAccessMethodName(),
- desc->isMultikey(txn),
- ice->getMultikeyPaths(txn),
+ desc->isMultikey(opCtx),
+ ice->getMultikeyPaths(opCtx),
desc->isSparse(),
desc->unique(),
desc->indexName(),
@@ -193,45 +193,45 @@ void CollectionInfoCache::updatePlanCacheIndexEntries(OperationContext* txn) {
_planCache->notifyOfIndexEntries(indexEntries);
}
-void CollectionInfoCache::init(OperationContext* txn) {
+void CollectionInfoCache::init(OperationContext* opCtx) {
// Requires exclusive collection lock.
- invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
const bool includeUnfinishedIndexes = false;
IndexCatalog::IndexIterator ii =
- _collection->getIndexCatalog()->getIndexIterator(txn, includeUnfinishedIndexes);
+ _collection->getIndexCatalog()->getIndexIterator(opCtx, includeUnfinishedIndexes);
while (ii.more()) {
const IndexDescriptor* desc = ii.next();
_indexUsageTracker.registerIndex(desc->indexName(), desc->keyPattern());
}
- rebuildIndexData(txn);
+ rebuildIndexData(opCtx);
}
-void CollectionInfoCache::addedIndex(OperationContext* txn, const IndexDescriptor* desc) {
+void CollectionInfoCache::addedIndex(OperationContext* opCtx, const IndexDescriptor* desc) {
// Requires exclusive collection lock.
- invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
invariant(desc);
- rebuildIndexData(txn);
+ rebuildIndexData(opCtx);
_indexUsageTracker.registerIndex(desc->indexName(), desc->keyPattern());
}
-void CollectionInfoCache::droppedIndex(OperationContext* txn, StringData indexName) {
+void CollectionInfoCache::droppedIndex(OperationContext* opCtx, StringData indexName) {
// Requires exclusive collection lock.
- invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
- rebuildIndexData(txn);
+ rebuildIndexData(opCtx);
_indexUsageTracker.unregisterIndex(indexName);
}
-void CollectionInfoCache::rebuildIndexData(OperationContext* txn) {
+void CollectionInfoCache::rebuildIndexData(OperationContext* opCtx) {
clearQueryCache();
_keysComputed = false;
- computeIndexKeys(txn);
- updatePlanCacheIndexEntries(txn);
+ computeIndexKeys(opCtx);
+ updatePlanCacheIndexEntries(opCtx);
}
CollectionIndexUsageMap CollectionInfoCache::getIndexUsageStats() const {
diff --git a/src/mongo/db/catalog/collection_info_cache.h b/src/mongo/db/catalog/collection_info_cache.h
index ea832633cd3..cfe783748d1 100644
--- a/src/mongo/db/catalog/collection_info_cache.h
+++ b/src/mongo/db/catalog/collection_info_cache.h
@@ -64,7 +64,7 @@ public:
/* get set of index keys for this namespace. handy to quickly check if a given
field is indexed (Note it might be a secondary component of a compound index.)
*/
- const UpdateIndexData& getIndexKeys(OperationContext* txn) const;
+ const UpdateIndexData& getIndexKeys(OperationContext* opCtx) const;
/**
* Returns cached index usage statistics for this collection. The map returned will contain
@@ -78,7 +78,7 @@ public:
/**
* Builds internal cache state based on the current state of the Collection's IndexCatalog
*/
- void init(OperationContext* txn);
+ void init(OperationContext* opCtx);
/**
* Register a newly-created index with the cache. Must be called whenever an index is
@@ -86,7 +86,7 @@ public:
*
* Must be called under exclusive collection lock.
*/
- void addedIndex(OperationContext* txn, const IndexDescriptor* desc);
+ void addedIndex(OperationContext* opCtx, const IndexDescriptor* desc);
/**
* Deregister a newly-dropped index with the cache. Must be called whenever an index is
@@ -94,7 +94,7 @@ public:
*
* Must be called under exclusive collection lock.
*/
- void droppedIndex(OperationContext* txn, StringData indexName);
+ void droppedIndex(OperationContext* opCtx, StringData indexName);
/**
* Removes all cached query plans.
@@ -105,7 +105,7 @@ public:
* Signal to the cache that a query operation has completed. 'indexesUsed' should list the
* set of indexes used by the winning plan, if any.
*/
- void notifyOfQuery(OperationContext* txn, const std::set<std::string>& indexesUsed);
+ void notifyOfQuery(OperationContext* opCtx, const std::set<std::string>& indexesUsed);
private:
Collection* _collection; // not owned
@@ -124,14 +124,14 @@ private:
// Tracks index usage statistics for this collection.
CollectionIndexUsageTracker _indexUsageTracker;
- void computeIndexKeys(OperationContext* txn);
- void updatePlanCacheIndexEntries(OperationContext* txn);
+ void computeIndexKeys(OperationContext* opCtx);
+ void updatePlanCacheIndexEntries(OperationContext* opCtx);
/**
* Rebuilds cached information that is dependent on index composition. Must be called
* when index composition changes.
*/
- void rebuildIndexData(OperationContext* txn);
+ void rebuildIndexData(OperationContext* opCtx);
bool _hasTTLIndex = false;
};
diff --git a/src/mongo/db/catalog/create_collection.cpp b/src/mongo/db/catalog/create_collection.cpp
index abb1e1d8c16..2007aba76d6 100644
--- a/src/mongo/db/catalog/create_collection.cpp
+++ b/src/mongo/db/catalog/create_collection.cpp
@@ -40,7 +40,7 @@
#include "mongo/db/repl/replication_coordinator_global.h"
namespace mongo {
-Status createCollection(OperationContext* txn,
+Status createCollection(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& cmdObj,
const BSONObj& idIndex) {
@@ -73,27 +73,27 @@ Status createCollection(OperationContext* txn,
options.hasField("$nExtents"));
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbXLock(txn->lockState(), dbName, MODE_X);
- OldClientContext ctx(txn, nss.ns());
- if (txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, nss)) {
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock dbXLock(opCtx->lockState(), dbName, MODE_X);
+ OldClientContext ctx(opCtx, nss.ns());
+ if (opCtx->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, nss)) {
return Status(ErrorCodes::NotMaster,
str::stream() << "Not primary while creating collection " << nss.ns());
}
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
// Create collection.
const bool createDefaultIndexes = true;
- status = userCreateNS(txn, ctx.db(), nss.ns(), options, createDefaultIndexes, idIndex);
+ status = userCreateNS(opCtx, ctx.db(), nss.ns(), options, createDefaultIndexes, idIndex);
if (!status.isOK()) {
return status;
}
wunit.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "create", nss.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "create", nss.ns());
return Status::OK();
}
} // namespace mongo
diff --git a/src/mongo/db/catalog/create_collection.h b/src/mongo/db/catalog/create_collection.h
index 5f503a692a2..73dd82bebd8 100644
--- a/src/mongo/db/catalog/create_collection.h
+++ b/src/mongo/db/catalog/create_collection.h
@@ -40,7 +40,7 @@ class OperationContext;
* _id index according to 'idIndex', if it is non-empty. When 'idIndex' is empty, creates the
* default _id index.
*/
-Status createCollection(OperationContext* txn,
+Status createCollection(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& cmdObj,
const BSONObj& idIndex = BSONObj());
diff --git a/src/mongo/db/catalog/cursor_manager.cpp b/src/mongo/db/catalog/cursor_manager.cpp
index f6f1cf21474..2c9f9b2d9d8 100644
--- a/src/mongo/db/catalog/cursor_manager.cpp
+++ b/src/mongo/db/catalog/cursor_manager.cpp
@@ -107,11 +107,11 @@ public:
/**
* works globally
*/
- bool eraseCursor(OperationContext* txn, CursorId id, bool checkAuth);
+ bool eraseCursor(OperationContext* opCtx, CursorId id, bool checkAuth);
void appendStats(BSONObjBuilder& builder);
- std::size_t timeoutCursors(OperationContext* txn, int millisSinceLastCall);
+ std::size_t timeoutCursors(OperationContext* opCtx, int millisSinceLastCall);
int64_t nextSeed();
@@ -178,7 +178,7 @@ void GlobalCursorIdCache::destroyed(unsigned id, const std::string& ns) {
_idToNS.erase(id);
}
-bool GlobalCursorIdCache::eraseCursor(OperationContext* txn, CursorId id, bool checkAuth) {
+bool GlobalCursorIdCache::eraseCursor(OperationContext* opCtx, CursorId id, bool checkAuth) {
// Figure out what the namespace of this cursor is.
std::string ns;
if (globalCursorManager->ownsCursorId(id)) {
@@ -206,17 +206,17 @@ bool GlobalCursorIdCache::eraseCursor(OperationContext* txn, CursorId id, bool c
// Check if we are authorized to erase this cursor.
if (checkAuth) {
- AuthorizationSession* as = AuthorizationSession::get(txn->getClient());
+ AuthorizationSession* as = AuthorizationSession::get(opCtx->getClient());
Status authorizationStatus = as->checkAuthForKillCursors(nss, id);
if (!authorizationStatus.isOK()) {
- audit::logKillCursorsAuthzCheck(txn->getClient(), nss, id, ErrorCodes::Unauthorized);
+ audit::logKillCursorsAuthzCheck(opCtx->getClient(), nss, id, ErrorCodes::Unauthorized);
return false;
}
}
// If this cursor is owned by the global cursor manager, ask it to erase the cursor for us.
if (globalCursorManager->ownsCursorId(id)) {
- Status eraseStatus = globalCursorManager->eraseCursor(txn, id, checkAuth);
+ Status eraseStatus = globalCursorManager->eraseCursor(opCtx, id, checkAuth);
massert(28697,
eraseStatus.reason(),
eraseStatus.code() == ErrorCodes::OK ||
@@ -226,15 +226,16 @@ bool GlobalCursorIdCache::eraseCursor(OperationContext* txn, CursorId id, bool c
// If not, then the cursor must be owned by a collection. Erase the cursor under the
// collection lock (to prevent the collection from going away during the erase).
- AutoGetCollectionForRead ctx(txn, nss);
+ AutoGetCollectionForRead ctx(opCtx, nss);
Collection* collection = ctx.getCollection();
if (!collection) {
if (checkAuth)
- audit::logKillCursorsAuthzCheck(txn->getClient(), nss, id, ErrorCodes::CursorNotFound);
+ audit::logKillCursorsAuthzCheck(
+ opCtx->getClient(), nss, id, ErrorCodes::CursorNotFound);
return false;
}
- Status eraseStatus = collection->getCursorManager()->eraseCursor(txn, id, checkAuth);
+ Status eraseStatus = collection->getCursorManager()->eraseCursor(opCtx, id, checkAuth);
massert(16089,
eraseStatus.reason(),
eraseStatus.code() == ErrorCodes::OK ||
@@ -242,7 +243,7 @@ bool GlobalCursorIdCache::eraseCursor(OperationContext* txn, CursorId id, bool c
return eraseStatus.isOK();
}
-std::size_t GlobalCursorIdCache::timeoutCursors(OperationContext* txn, int millisSinceLastCall) {
+std::size_t GlobalCursorIdCache::timeoutCursors(OperationContext* opCtx, int millisSinceLastCall) {
size_t totalTimedOut = 0;
// Time out the cursors from the global cursor manager.
@@ -265,7 +266,7 @@ std::size_t GlobalCursorIdCache::timeoutCursors(OperationContext* txn, int milli
// For each collection, time out its cursors under the collection lock (to prevent the
// collection from going away during the erase).
for (unsigned i = 0; i < todo.size(); i++) {
- AutoGetCollectionOrViewForRead ctx(txn, NamespaceString(todo[i]));
+ AutoGetCollectionOrViewForRead ctx(opCtx, NamespaceString(todo[i]));
if (!ctx.getDb()) {
continue;
}
@@ -287,26 +288,26 @@ CursorManager* CursorManager::getGlobalCursorManager() {
return globalCursorManager.get();
}
-std::size_t CursorManager::timeoutCursorsGlobal(OperationContext* txn, int millisSinceLastCall) {
- return globalCursorIdCache->timeoutCursors(txn, millisSinceLastCall);
+std::size_t CursorManager::timeoutCursorsGlobal(OperationContext* opCtx, int millisSinceLastCall) {
+ return globalCursorIdCache->timeoutCursors(opCtx, millisSinceLastCall);
}
-int CursorManager::eraseCursorGlobalIfAuthorized(OperationContext* txn, int n, const char* _ids) {
+int CursorManager::eraseCursorGlobalIfAuthorized(OperationContext* opCtx, int n, const char* _ids) {
ConstDataCursor ids(_ids);
int numDeleted = 0;
for (int i = 0; i < n; i++) {
- if (eraseCursorGlobalIfAuthorized(txn, ids.readAndAdvance<LittleEndian<int64_t>>()))
+ if (eraseCursorGlobalIfAuthorized(opCtx, ids.readAndAdvance<LittleEndian<int64_t>>()))
numDeleted++;
if (globalInShutdownDeprecated())
break;
}
return numDeleted;
}
-bool CursorManager::eraseCursorGlobalIfAuthorized(OperationContext* txn, CursorId id) {
- return globalCursorIdCache->eraseCursor(txn, id, true);
+bool CursorManager::eraseCursorGlobalIfAuthorized(OperationContext* opCtx, CursorId id) {
+ return globalCursorIdCache->eraseCursor(opCtx, id, true);
}
-bool CursorManager::eraseCursorGlobal(OperationContext* txn, CursorId id) {
- return globalCursorIdCache->eraseCursor(txn, id, false);
+bool CursorManager::eraseCursorGlobal(OperationContext* opCtx, CursorId id) {
+ return globalCursorIdCache->eraseCursor(opCtx, id, false);
}
@@ -397,7 +398,7 @@ void CursorManager::invalidateAll(bool collectionGoingAway, const std::string& r
}
}
-void CursorManager::invalidateDocument(OperationContext* txn,
+void CursorManager::invalidateDocument(OperationContext* opCtx,
const RecordId& dl,
InvalidationType type) {
if (supportsDocLocking()) {
@@ -411,13 +412,13 @@ void CursorManager::invalidateDocument(OperationContext* txn,
for (ExecSet::iterator it = _nonCachedExecutors.begin(); it != _nonCachedExecutors.end();
++it) {
PlanExecutor* exec = *it;
- exec->invalidate(txn, dl, type);
+ exec->invalidate(opCtx, dl, type);
}
for (CursorMap::const_iterator i = _cursors.begin(); i != _cursors.end(); ++i) {
PlanExecutor* exec = i->second->getExecutor();
if (exec) {
- exec->invalidate(txn, dl, type);
+ exec->invalidate(opCtx, dl, type);
}
}
}
@@ -543,7 +544,7 @@ void CursorManager::deregisterCursor(ClientCursor* cc) {
_deregisterCursor_inlock(cc);
}
-Status CursorManager::eraseCursor(OperationContext* txn, CursorId id, bool shouldAudit) {
+Status CursorManager::eraseCursor(OperationContext* opCtx, CursorId id, bool shouldAudit) {
ClientCursor* cursor;
{
@@ -553,7 +554,7 @@ Status CursorManager::eraseCursor(OperationContext* txn, CursorId id, bool shoul
if (it == _cursors.end()) {
if (shouldAudit) {
audit::logKillCursorsAuthzCheck(
- txn->getClient(), _nss, id, ErrorCodes::CursorNotFound);
+ opCtx->getClient(), _nss, id, ErrorCodes::CursorNotFound);
}
return {ErrorCodes::CursorNotFound, str::stream() << "Cursor id not found: " << id};
}
@@ -563,14 +564,14 @@ Status CursorManager::eraseCursor(OperationContext* txn, CursorId id, bool shoul
if (cursor->_isPinned) {
if (shouldAudit) {
audit::logKillCursorsAuthzCheck(
- txn->getClient(), _nss, id, ErrorCodes::OperationFailed);
+ opCtx->getClient(), _nss, id, ErrorCodes::OperationFailed);
}
return {ErrorCodes::OperationFailed,
str::stream() << "Cannot kill pinned cursor: " << id};
}
if (shouldAudit) {
- audit::logKillCursorsAuthzCheck(txn->getClient(), _nss, id, ErrorCodes::OK);
+ audit::logKillCursorsAuthzCheck(opCtx->getClient(), _nss, id, ErrorCodes::OK);
}
cursor->kill();
diff --git a/src/mongo/db/catalog/cursor_manager.h b/src/mongo/db/catalog/cursor_manager.h
index 36c861f7a5c..ad4289d4f38 100644
--- a/src/mongo/db/catalog/cursor_manager.h
+++ b/src/mongo/db/catalog/cursor_manager.h
@@ -99,7 +99,7 @@ public:
* Broadcast a document invalidation to all relevant PlanExecutor(s). invalidateDocument
* must called *before* the provided RecordId is about to be deleted or mutated.
*/
- void invalidateDocument(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ void invalidateDocument(OperationContext* opCtx, const RecordId& dl, InvalidationType type);
/**
* Destroys cursors that have been inactive for too long.
@@ -151,7 +151,7 @@ public:
*
* If 'shouldAudit' is true, will perform audit logging.
*/
- Status eraseCursor(OperationContext* txn, CursorId id, bool shouldAudit);
+ Status eraseCursor(OperationContext* opCtx, CursorId id, bool shouldAudit);
/**
* Returns true if the space of cursor ids that cursor manager is responsible for includes
@@ -172,17 +172,17 @@ public:
static CursorManager* getGlobalCursorManager();
- static int eraseCursorGlobalIfAuthorized(OperationContext* txn, int n, const char* ids);
+ static int eraseCursorGlobalIfAuthorized(OperationContext* opCtx, int n, const char* ids);
- static bool eraseCursorGlobalIfAuthorized(OperationContext* txn, CursorId id);
+ static bool eraseCursorGlobalIfAuthorized(OperationContext* opCtx, CursorId id);
- static bool eraseCursorGlobal(OperationContext* txn, CursorId id);
+ static bool eraseCursorGlobal(OperationContext* opCtx, CursorId id);
/**
* Deletes inactive cursors from the global cursor manager and from all per-collection cursor
* managers. Returns the number of cursors that were timed out.
*/
- static std::size_t timeoutCursorsGlobal(OperationContext* txn, int millisSinceLastCall);
+ static std::size_t timeoutCursorsGlobal(OperationContext* opCtx, int millisSinceLastCall);
private:
friend class ClientCursorPin;
diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp
index d9abaef39ca..b6c11f4c51f 100644
--- a/src/mongo/db/catalog/database.cpp
+++ b/src/mongo/db/catalog/database.cpp
@@ -85,8 +85,8 @@ void massertNamespaceNotIndex(StringData ns, StringData caller) {
class Database::AddCollectionChange : public RecoveryUnit::Change {
public:
- AddCollectionChange(OperationContext* txn, Database* db, StringData ns)
- : _txn(txn), _db(db), _ns(ns.toString()) {}
+ AddCollectionChange(OperationContext* opCtx, Database* db, StringData ns)
+ : _opCtx(opCtx), _db(db), _ns(ns.toString()) {}
virtual void commit() {
CollectionMap::const_iterator it = _db->_collections.find(_ns);
@@ -94,8 +94,8 @@ public:
return;
// Ban reading from this collection on committed reads on snapshots before now.
- auto replCoord = repl::ReplicationCoordinator::get(_txn);
- auto snapshotName = replCoord->reserveSnapshotName(_txn);
+ auto replCoord = repl::ReplicationCoordinator::get(_opCtx);
+ auto snapshotName = replCoord->reserveSnapshotName(_opCtx);
replCoord->forceSnapshotCreation(); // Ensures a newer snapshot gets created even if idle.
it->second->setMinimumVisibleSnapshot(snapshotName);
}
@@ -109,7 +109,7 @@ public:
_db->_collections.erase(it);
}
- OperationContext* const _txn;
+ OperationContext* const _opCtx;
Database* const _db;
const std::string _ns;
};
@@ -138,11 +138,11 @@ Database::~Database() {
delete i->second;
}
-void Database::close(OperationContext* txn) {
+void Database::close(OperationContext* opCtx) {
// XXX? - Do we need to close database under global lock or just DB-lock is sufficient ?
- invariant(txn->lockState()->isW());
+ invariant(opCtx->lockState()->isW());
// oplog caches some things, dirty its caches
- repl::oplogCheckCloseDatabase(txn, this);
+ repl::oplogCheckCloseDatabase(opCtx, this);
if (BackgroundOperation::inProgForDb(_name)) {
log() << "warning: bg op in prog during close db? " << _name;
@@ -181,7 +181,7 @@ Status Database::validateDBName(StringData dbname) {
return Status::OK();
}
-Collection* Database::_getOrCreateCollectionInstance(OperationContext* txn, StringData fullns) {
+Collection* Database::_getOrCreateCollectionInstance(OperationContext* opCtx, StringData fullns) {
Collection* collection = getCollection(fullns);
if (collection) {
return collection;
@@ -194,11 +194,11 @@ Collection* Database::_getOrCreateCollectionInstance(OperationContext* txn, Stri
invariant(rs.get()); // if cce exists, so should this
// Not registering AddCollectionChange since this is for collections that already exist.
- Collection* c = new Collection(txn, fullns, cce.release(), rs.release(), _dbEntry);
+ Collection* c = new Collection(opCtx, fullns, cce.release(), rs.release(), _dbEntry);
return c;
}
-Database::Database(OperationContext* txn, StringData name, DatabaseCatalogEntry* dbEntry)
+Database::Database(OperationContext* opCtx, StringData name, DatabaseCatalogEntry* dbEntry)
: _name(name.toString()),
_dbEntry(dbEntry),
_profileName(_name + ".system.profile"),
@@ -218,14 +218,14 @@ Database::Database(OperationContext* txn, StringData name, DatabaseCatalogEntry*
_dbEntry->getCollectionNamespaces(&collections);
for (list<string>::const_iterator it = collections.begin(); it != collections.end(); ++it) {
const string ns = *it;
- _collections[ns] = _getOrCreateCollectionInstance(txn, ns);
+ _collections[ns] = _getOrCreateCollectionInstance(opCtx, ns);
}
// At construction time of the viewCatalog, the _collections map wasn't initialized yet, so no
// system.views collection would be found. Now we're sufficiently initialized, signal a version
// change. Also force a reload, so if there are problems with the catalog contents as might be
// caused by incorrect mongod versions or similar, they are found right away.
_views.invalidate();
- Status reloadStatus = _views.reloadIfNeeded(txn);
+ Status reloadStatus = _views.reloadIfNeeded(opCtx);
if (!reloadStatus.isOK()) {
warning() << "Unable to parse views: " << redact(reloadStatus)
<< "; remove any invalid views from the " << _viewsName
@@ -233,8 +233,8 @@ Database::Database(OperationContext* txn, StringData name, DatabaseCatalogEntry*
}
}
-void Database::clearTmpCollections(OperationContext* txn) {
- invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
+void Database::clearTmpCollections(OperationContext* opCtx) {
+ invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_X));
list<string> collections;
_dbEntry->getCollectionNamespaces(&collections);
@@ -245,12 +245,12 @@ void Database::clearTmpCollections(OperationContext* txn) {
CollectionCatalogEntry* coll = _dbEntry->getCollectionCatalogEntry(ns);
- CollectionOptions options = coll->getCollectionOptions(txn);
+ CollectionOptions options = coll->getCollectionOptions(opCtx);
if (!options.temp)
continue;
try {
- WriteUnitOfWork wunit(txn);
- Status status = dropCollection(txn, ns);
+ WriteUnitOfWork wunit(opCtx);
+ Status status = dropCollection(opCtx, ns);
if (!status.isOK()) {
warning() << "could not drop temp collection '" << ns << "': " << redact(status);
continue;
@@ -260,12 +260,12 @@ void Database::clearTmpCollections(OperationContext* txn) {
} catch (const WriteConflictException& exp) {
warning() << "could not drop temp collection '" << ns << "' due to "
"WriteConflictException";
- txn->recoveryUnit()->abandonSnapshot();
+ opCtx->recoveryUnit()->abandonSnapshot();
}
}
}
-Status Database::setProfilingLevel(OperationContext* txn, int newLevel) {
+Status Database::setProfilingLevel(OperationContext* opCtx, int newLevel) {
if (_profile == newLevel) {
return Status::OK();
}
@@ -279,7 +279,7 @@ Status Database::setProfilingLevel(OperationContext* txn, int newLevel) {
return Status(ErrorCodes::BadValue, "profiling level has to be >=0 and <= 2");
}
- Status status = createProfileCollection(txn, this);
+ Status status = createProfileCollection(opCtx, this);
if (!status.isOK()) {
return status;
}
@@ -336,13 +336,13 @@ void Database::getStats(OperationContext* opCtx, BSONObjBuilder* output, double
_dbEntry->appendExtraStats(opCtx, output, scale);
}
-Status Database::dropView(OperationContext* txn, StringData fullns) {
- Status status = _views.dropView(txn, NamespaceString(fullns));
- Top::get(txn->getClient()->getServiceContext()).collectionDropped(fullns);
+Status Database::dropView(OperationContext* opCtx, StringData fullns) {
+ Status status = _views.dropView(opCtx, NamespaceString(fullns));
+ Top::get(opCtx->getClient()->getServiceContext()).collectionDropped(fullns);
return status;
}
-Status Database::dropCollection(OperationContext* txn, StringData fullns) {
+Status Database::dropCollection(OperationContext* opCtx, StringData fullns) {
if (!getCollection(fullns)) {
// Collection doesn't exist so don't bother validating if it can be dropped.
return Status::OK();
@@ -364,11 +364,12 @@ Status Database::dropCollection(OperationContext* txn, StringData fullns) {
}
}
- return dropCollectionEvenIfSystem(txn, nss);
+ return dropCollectionEvenIfSystem(opCtx, nss);
}
-Status Database::dropCollectionEvenIfSystem(OperationContext* txn, const NamespaceString& fullns) {
- invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
+Status Database::dropCollectionEvenIfSystem(OperationContext* opCtx,
+ const NamespaceString& fullns) {
+ invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_X));
LOG(1) << "dropCollection: " << fullns;
@@ -383,23 +384,23 @@ Status Database::dropCollectionEvenIfSystem(OperationContext* txn, const Namespa
audit::logDropCollection(&cc(), fullns.toString());
- Status s = collection->getIndexCatalog()->dropAllIndexes(txn, true);
+ Status s = collection->getIndexCatalog()->dropAllIndexes(opCtx, true);
if (!s.isOK()) {
warning() << "could not drop collection, trying to drop indexes" << fullns << " because of "
<< redact(s.toString());
return s;
}
- verify(collection->_details->getTotalIndexCount(txn) == 0);
+ verify(collection->_details->getTotalIndexCount(opCtx) == 0);
LOG(1) << "\t dropIndexes done";
- Top::get(txn->getClient()->getServiceContext()).collectionDropped(fullns.toString());
+ Top::get(opCtx->getClient()->getServiceContext()).collectionDropped(fullns.toString());
// We want to destroy the Collection object before telling the StorageEngine to destroy the
// RecordStore.
- _clearCollectionCache(txn, fullns.toString(), "collection dropped");
+ _clearCollectionCache(opCtx, fullns.toString(), "collection dropped");
- s = _dbEntry->dropCollection(txn, fullns.toString());
+ s = _dbEntry->dropCollection(opCtx, fullns.toString());
if (!s.isOK())
return s;
@@ -416,12 +417,12 @@ Status Database::dropCollectionEvenIfSystem(OperationContext* txn, const Namespa
}
}
- getGlobalServiceContext()->getOpObserver()->onDropCollection(txn, fullns);
+ getGlobalServiceContext()->getOpObserver()->onDropCollection(opCtx, fullns);
return Status::OK();
}
-void Database::_clearCollectionCache(OperationContext* txn,
+void Database::_clearCollectionCache(OperationContext* opCtx,
StringData fullns,
const std::string& reason) {
verify(_name == nsToDatabaseSubstring(fullns));
@@ -430,7 +431,7 @@ void Database::_clearCollectionCache(OperationContext* txn,
return;
// Takes ownership of the collection
- txn->recoveryUnit()->registerChange(new RemoveCollectionChange(this, it->second));
+ opCtx->recoveryUnit()->registerChange(new RemoveCollectionChange(this, it->second));
it->second->_cursorManager.invalidateAll(false, reason);
_collections.erase(it);
@@ -446,12 +447,12 @@ Collection* Database::getCollection(StringData ns) const {
return NULL;
}
-Status Database::renameCollection(OperationContext* txn,
+Status Database::renameCollection(OperationContext* opCtx,
StringData fromNS,
StringData toNS,
bool stayTemp) {
audit::logRenameCollection(&cc(), fromNS, toNS);
- invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_X));
BackgroundOperation::assertNoBgOpInProgForNs(fromNS);
BackgroundOperation::assertNoBgOpInProgForNs(toNS);
@@ -462,28 +463,28 @@ Status Database::renameCollection(OperationContext* txn,
string clearCacheReason = str::stream() << "renamed collection '" << fromNS << "' to '"
<< toNS << "'";
- IndexCatalog::IndexIterator ii = coll->getIndexCatalog()->getIndexIterator(txn, true);
+ IndexCatalog::IndexIterator ii = coll->getIndexCatalog()->getIndexIterator(opCtx, true);
while (ii.more()) {
IndexDescriptor* desc = ii.next();
- _clearCollectionCache(txn, desc->indexNamespace(), clearCacheReason);
+ _clearCollectionCache(opCtx, desc->indexNamespace(), clearCacheReason);
}
- _clearCollectionCache(txn, fromNS, clearCacheReason);
- _clearCollectionCache(txn, toNS, clearCacheReason);
+ _clearCollectionCache(opCtx, fromNS, clearCacheReason);
+ _clearCollectionCache(opCtx, toNS, clearCacheReason);
- Top::get(txn->getClient()->getServiceContext()).collectionDropped(fromNS.toString());
+ Top::get(opCtx->getClient()->getServiceContext()).collectionDropped(fromNS.toString());
}
- txn->recoveryUnit()->registerChange(new AddCollectionChange(txn, this, toNS));
- Status s = _dbEntry->renameCollection(txn, fromNS, toNS, stayTemp);
- _collections[toNS] = _getOrCreateCollectionInstance(txn, toNS);
+ opCtx->recoveryUnit()->registerChange(new AddCollectionChange(opCtx, this, toNS));
+ Status s = _dbEntry->renameCollection(opCtx, fromNS, toNS, stayTemp);
+ _collections[toNS] = _getOrCreateCollectionInstance(opCtx, toNS);
return s;
}
-Collection* Database::getOrCreateCollection(OperationContext* txn, StringData ns) {
+Collection* Database::getOrCreateCollection(OperationContext* opCtx, StringData ns) {
Collection* c = getCollection(ns);
if (!c) {
- c = createCollection(txn, ns);
+ c = createCollection(opCtx, ns);
}
return c;
}
@@ -509,10 +510,10 @@ void Database::_checkCanCreateCollection(const NamespaceString& nss,
uassert(28838, "cannot create a non-capped oplog collection", options.capped || !nss.isOplog());
}
-Status Database::createView(OperationContext* txn,
+Status Database::createView(OperationContext* opCtx,
StringData ns,
const CollectionOptions& options) {
- invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_X));
invariant(options.isView());
NamespaceString nss(ns);
@@ -524,27 +525,27 @@ Status Database::createView(OperationContext* txn,
return Status(ErrorCodes::InvalidNamespace,
str::stream() << "invalid namespace name for a view: " + nss.toString());
- return _views.createView(txn, nss, viewOnNss, BSONArray(options.pipeline), options.collation);
+ return _views.createView(opCtx, nss, viewOnNss, BSONArray(options.pipeline), options.collation);
}
-Collection* Database::createCollection(OperationContext* txn,
+Collection* Database::createCollection(OperationContext* opCtx,
StringData ns,
const CollectionOptions& options,
bool createIdIndex,
const BSONObj& idIndex) {
- invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_X));
invariant(!options.isView());
NamespaceString nss(ns);
_checkCanCreateCollection(nss, options);
audit::logCreateCollection(&cc(), ns);
- Status status = _dbEntry->createCollection(txn, ns, options, true /*allocateDefaultSpace*/);
+ Status status = _dbEntry->createCollection(opCtx, ns, options, true /*allocateDefaultSpace*/);
massertNoTraceStatusOK(status);
- txn->recoveryUnit()->registerChange(new AddCollectionChange(txn, this, ns));
- Collection* collection = _getOrCreateCollectionInstance(txn, ns);
+ opCtx->recoveryUnit()->registerChange(new AddCollectionChange(opCtx, this, ns));
+ Collection* collection = _getOrCreateCollectionInstance(opCtx, ns);
invariant(collection);
_collections[ns] = collection;
@@ -558,19 +559,19 @@ Collection* Database::createCollection(OperationContext* txn,
serverGlobalParams.featureCompatibility.version.load();
IndexCatalog* ic = collection->getIndexCatalog();
fullIdIndexSpec = uassertStatusOK(ic->createIndexOnEmptyCollection(
- txn,
+ opCtx,
!idIndex.isEmpty() ? idIndex
: ic->getDefaultIdIndexSpec(featureCompatibilityVersion)));
}
}
if (nss.isSystem()) {
- authindex::createSystemIndexes(txn, collection);
+ authindex::createSystemIndexes(opCtx, collection);
}
}
getGlobalServiceContext()->getOpObserver()->onCreateCollection(
- txn, nss, options, fullIdIndexSpec);
+ opCtx, nss, options, fullIdIndexSpec);
return collection;
}
@@ -579,9 +580,9 @@ const DatabaseCatalogEntry* Database::getDatabaseCatalogEntry() const {
return _dbEntry;
}
-void dropAllDatabasesExceptLocal(OperationContext* txn) {
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
+void dropAllDatabasesExceptLocal(OperationContext* opCtx) {
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite lk(opCtx->lockState());
vector<string> n;
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
@@ -595,47 +596,47 @@ void dropAllDatabasesExceptLocal(OperationContext* txn) {
for (vector<string>::iterator i = n.begin(); i != n.end(); i++) {
if (*i != "local") {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- Database* db = dbHolder().get(txn, *i);
+ Database* db = dbHolder().get(opCtx, *i);
// This is needed since dropDatabase can't be rolled back.
- // This is safe be replaced by "invariant(db);dropDatabase(txn, db);" once fixed
+ // This is safe be replaced by "invariant(db);dropDatabase(opCtx, db);" once fixed
if (db == nullptr) {
log() << "database disappeared after listDatabases but before drop: " << *i;
} else {
- Database::dropDatabase(txn, db);
+ Database::dropDatabase(opCtx, db);
}
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "dropAllDatabasesExceptLocal", *i);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "dropAllDatabasesExceptLocal", *i);
}
}
}
-void Database::dropDatabase(OperationContext* txn, Database* db) {
+void Database::dropDatabase(OperationContext* opCtx, Database* db) {
invariant(db);
// Store the name so we have if for after the db object is deleted
const string name = db->name();
LOG(1) << "dropDatabase " << name;
- invariant(txn->lockState()->isDbLockedForMode(name, MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(name, MODE_X));
BackgroundOperation::assertNoBgOpInProgForDb(name);
- audit::logDropDatabase(txn->getClient(), name);
+ audit::logDropDatabase(opCtx->getClient(), name);
for (auto&& coll : *db) {
- Top::get(txn->getClient()->getServiceContext()).collectionDropped(coll->ns().ns(), true);
+ Top::get(opCtx->getClient()->getServiceContext()).collectionDropped(coll->ns().ns(), true);
}
- dbHolder().close(txn, name);
+ dbHolder().close(opCtx, name);
db = NULL; // d is now deleted
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- getGlobalServiceContext()->getGlobalStorageEngine()->dropDatabase(txn, name);
+ getGlobalServiceContext()->getGlobalStorageEngine()->dropDatabase(opCtx, name);
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "dropDatabase", name);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "dropDatabase", name);
}
-Status userCreateNS(OperationContext* txn,
+Status userCreateNS(OperationContext* opCtx,
Database* db,
StringData ns,
BSONObj options,
@@ -654,7 +655,7 @@ Status userCreateNS(OperationContext* txn,
return Status(ErrorCodes::NamespaceExists,
str::stream() << "a collection '" << ns.toString() << "' already exists");
- if (db->getViewCatalog()->lookup(txn, ns))
+ if (db->getViewCatalog()->lookup(opCtx, ns))
return Status(ErrorCodes::NamespaceExists,
str::stream() << "a view '" << ns.toString() << "' already exists");
@@ -665,7 +666,7 @@ Status userCreateNS(OperationContext* txn,
// Validate the collation, if there is one.
if (!collectionOptions.collation.isEmpty()) {
- auto collator = CollatorFactoryInterface::get(txn->getServiceContext())
+ auto collator = CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(collectionOptions.collation);
if (!collator.isOK()) {
return collator.getStatus();
@@ -703,9 +704,10 @@ Status userCreateNS(OperationContext* txn,
}
if (collectionOptions.isView()) {
- uassertStatusOK(db->createView(txn, ns, collectionOptions));
+ uassertStatusOK(db->createView(opCtx, ns, collectionOptions));
} else {
- invariant(db->createCollection(txn, ns, collectionOptions, createDefaultIndexes, idIndex));
+ invariant(
+ db->createCollection(opCtx, ns, collectionOptions, createDefaultIndexes, idIndex));
}
return Status::OK();
diff --git a/src/mongo/db/catalog/database.h b/src/mongo/db/catalog/database.h
index 2b405710b78..de8e65aa3ce 100644
--- a/src/mongo/db/catalog/database.h
+++ b/src/mongo/db/catalog/database.h
@@ -106,7 +106,7 @@ public:
CollectionMap::const_iterator _it;
};
- Database(OperationContext* txn, StringData name, DatabaseCatalogEntry* dbEntry);
+ Database(OperationContext* opCtx, StringData name, DatabaseCatalogEntry* dbEntry);
// must call close first
~Database();
@@ -120,21 +120,21 @@ public:
}
// closes files and other cleanup see below.
- void close(OperationContext* txn);
+ void close(OperationContext* opCtx);
const std::string& name() const {
return _name;
}
- void clearTmpCollections(OperationContext* txn);
+ void clearTmpCollections(OperationContext* opCtx);
/**
* Sets a new profiling level for the database and returns the outcome.
*
- * @param txn Operation context which to use for creating the profiling collection.
+ * @param opCtx Operation context which to use for creating the profiling collection.
* @param newLevel New profiling level to use.
*/
- Status setProfilingLevel(OperationContext* txn, int newLevel);
+ Status setProfilingLevel(OperationContext* opCtx, int newLevel);
int getProfilingLevel() const {
return _profile;
@@ -151,18 +151,20 @@ public:
* dropCollection() will refuse to drop system collections. Use dropCollectionEvenIfSystem() if
* that is required.
*/
- Status dropCollection(OperationContext* txn, StringData fullns);
- Status dropCollectionEvenIfSystem(OperationContext* txn, const NamespaceString& fullns);
+ Status dropCollection(OperationContext* opCtx, StringData fullns);
+ Status dropCollectionEvenIfSystem(OperationContext* opCtx, const NamespaceString& fullns);
- Status dropView(OperationContext* txn, StringData fullns);
+ Status dropView(OperationContext* opCtx, StringData fullns);
- Collection* createCollection(OperationContext* txn,
+ Collection* createCollection(OperationContext* opCtx,
StringData ns,
const CollectionOptions& options = CollectionOptions(),
bool createDefaultIndexes = true,
const BSONObj& idIndex = BSONObj());
- Status createView(OperationContext* txn, StringData viewName, const CollectionOptions& options);
+ Status createView(OperationContext* opCtx,
+ StringData viewName,
+ const CollectionOptions& options);
/**
* @param ns - this is fully qualified, which is maybe not ideal ???
@@ -181,9 +183,9 @@ public:
return &_views;
}
- Collection* getOrCreateCollection(OperationContext* txn, StringData ns);
+ Collection* getOrCreateCollection(OperationContext* opCtx, StringData ns);
- Status renameCollection(OperationContext* txn,
+ Status renameCollection(OperationContext* opCtx,
StringData fromNS,
StringData toNS,
bool stayTemp);
@@ -195,7 +197,7 @@ public:
*
* Must be called with the specified database locked in X mode.
*/
- static void dropDatabase(OperationContext* txn, Database* db);
+ static void dropDatabase(OperationContext* opCtx, Database* db);
static Status validateDBName(StringData dbname);
@@ -215,7 +217,7 @@ private:
* Note: This does not add the collection to _collections map, that must be done
* by the caller, who takes onership of the Collection*
*/
- Collection* _getOrCreateCollectionInstance(OperationContext* txn, StringData fullns);
+ Collection* _getOrCreateCollectionInstance(OperationContext* opCtx, StringData fullns);
/**
* Throws if there is a reason 'ns' cannot be created as a user collection.
@@ -226,7 +228,9 @@ private:
* Deregisters and invalidates all cursors on collection 'fullns'. Callers must specify
* 'reason' for why the cache is being cleared.
*/
- void _clearCollectionCache(OperationContext* txn, StringData fullns, const std::string& reason);
+ void _clearCollectionCache(OperationContext* opCtx,
+ StringData fullns,
+ const std::string& reason);
class AddCollectionChange;
class RemoveCollectionChange;
@@ -251,7 +255,7 @@ private:
friend class IndexCatalog;
};
-void dropAllDatabasesExceptLocal(OperationContext* txn);
+void dropAllDatabasesExceptLocal(OperationContext* opCtx);
/**
* Creates the namespace 'ns' in the database 'db' according to 'options'. If 'createDefaultIndexes'
@@ -259,7 +263,7 @@ void dropAllDatabasesExceptLocal(OperationContext* txn);
* collections). Creates the collection's _id index according to 'idIndex', if it is non-empty. When
* 'idIndex' is empty, creates the default _id index.
*/
-Status userCreateNS(OperationContext* txn,
+Status userCreateNS(OperationContext* opCtx,
Database* db,
StringData ns,
BSONObj options,
diff --git a/src/mongo/db/catalog/database_catalog_entry.h b/src/mongo/db/catalog/database_catalog_entry.h
index 772871cacab..714e3537acf 100644
--- a/src/mongo/db/catalog/database_catalog_entry.h
+++ b/src/mongo/db/catalog/database_catalog_entry.h
@@ -96,16 +96,16 @@ public:
virtual RecordStore* getRecordStore(StringData ns) const = 0;
// Ownership passes to caller
- virtual IndexAccessMethod* getIndex(OperationContext* txn,
+ virtual IndexAccessMethod* getIndex(OperationContext* opCtx,
const CollectionCatalogEntry* collection,
IndexCatalogEntry* index) = 0;
- virtual Status createCollection(OperationContext* txn,
+ virtual Status createCollection(OperationContext* opCtx,
StringData ns,
const CollectionOptions& options,
bool allocateDefaultSpace) = 0;
- virtual Status renameCollection(OperationContext* txn,
+ virtual Status renameCollection(OperationContext* opCtx,
StringData fromNS,
StringData toNS,
bool stayTemp) = 0;
diff --git a/src/mongo/db/catalog/database_holder.cpp b/src/mongo/db/catalog/database_holder.cpp
index faa0e21a6d9..d3ee0e2e37f 100644
--- a/src/mongo/db/catalog/database_holder.cpp
+++ b/src/mongo/db/catalog/database_holder.cpp
@@ -81,9 +81,9 @@ DatabaseHolder& dbHolder() {
}
-Database* DatabaseHolder::get(OperationContext* txn, StringData ns) const {
+Database* DatabaseHolder::get(OperationContext* opCtx, StringData ns) const {
const StringData db = _todb(ns);
- invariant(txn->lockState()->isDbLockedForMode(db, MODE_IS));
+ invariant(opCtx->lockState()->isDbLockedForMode(db, MODE_IS));
stdx::lock_guard<SimpleMutex> lk(_m);
DBs::const_iterator it = _dbs.find(db);
@@ -110,9 +110,9 @@ std::set<std::string> DatabaseHolder::getNamesWithConflictingCasing(StringData n
return _getNamesWithConflictingCasing_inlock(name);
}
-Database* DatabaseHolder::openDb(OperationContext* txn, StringData ns, bool* justCreated) {
+Database* DatabaseHolder::openDb(OperationContext* opCtx, StringData ns, bool* justCreated) {
const StringData dbname = _todb(ns);
- invariant(txn->lockState()->isDbLockedForMode(dbname, MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(dbname, MODE_X));
if (justCreated)
*justCreated = false; // Until proven otherwise.
@@ -148,7 +148,7 @@ Database* DatabaseHolder::openDb(OperationContext* txn, StringData ns, bool* jus
// different databases for the same name.
lk.unlock();
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- DatabaseCatalogEntry* entry = storageEngine->getDatabaseCatalogEntry(txn, dbname);
+ DatabaseCatalogEntry* entry = storageEngine->getDatabaseCatalogEntry(opCtx, dbname);
if (!entry->exists()) {
audit::logCreateDatabase(&cc(), dbname);
@@ -156,7 +156,7 @@ Database* DatabaseHolder::openDb(OperationContext* txn, StringData ns, bool* jus
*justCreated = true;
}
- auto newDb = stdx::make_unique<Database>(txn, dbname, entry);
+ auto newDb = stdx::make_unique<Database>(opCtx, dbname, entry);
// Finally replace our nullptr entry with the new Database pointer.
removeDbGuard.Dismiss();
@@ -169,8 +169,8 @@ Database* DatabaseHolder::openDb(OperationContext* txn, StringData ns, bool* jus
return it->second;
}
-void DatabaseHolder::close(OperationContext* txn, StringData ns) {
- invariant(txn->lockState()->isW());
+void DatabaseHolder::close(OperationContext* opCtx, StringData ns) {
+ invariant(opCtx->lockState()->isW());
const StringData dbName = _todb(ns);
@@ -181,15 +181,15 @@ void DatabaseHolder::close(OperationContext* txn, StringData ns) {
return;
}
- it->second->close(txn);
+ it->second->close(opCtx);
delete it->second;
_dbs.erase(it);
- getGlobalServiceContext()->getGlobalStorageEngine()->closeDatabase(txn, dbName.toString());
+ getGlobalServiceContext()->getGlobalStorageEngine()->closeDatabase(opCtx, dbName.toString());
}
-bool DatabaseHolder::closeAll(OperationContext* txn, BSONObjBuilder& result, bool force) {
- invariant(txn->lockState()->isW());
+bool DatabaseHolder::closeAll(OperationContext* opCtx, BSONObjBuilder& result, bool force) {
+ invariant(opCtx->lockState()->isW());
stdx::lock_guard<SimpleMutex> lk(_m);
@@ -213,12 +213,12 @@ bool DatabaseHolder::closeAll(OperationContext* txn, BSONObjBuilder& result, boo
}
Database* db = _dbs[name];
- db->close(txn);
+ db->close(opCtx);
delete db;
_dbs.erase(name);
- getGlobalServiceContext()->getGlobalStorageEngine()->closeDatabase(txn, name);
+ getGlobalServiceContext()->getGlobalStorageEngine()->closeDatabase(opCtx, name);
bb.append(name);
}
diff --git a/src/mongo/db/catalog/database_holder.h b/src/mongo/db/catalog/database_holder.h
index 8881dfd9795..fd574832d6c 100644
--- a/src/mongo/db/catalog/database_holder.h
+++ b/src/mongo/db/catalog/database_holder.h
@@ -53,7 +53,7 @@ public:
* Retrieves an already opened database or returns NULL. Must be called with the database
* locked in at least IS-mode.
*/
- Database* get(OperationContext* txn, StringData ns) const;
+ Database* get(OperationContext* opCtx, StringData ns) const;
/**
* Retrieves a database reference if it is already opened, or opens it if it hasn't been
@@ -62,12 +62,12 @@ public:
* @param justCreated Returns whether the database was newly created (true) or it already
* existed (false). Can be NULL if this information is not necessary.
*/
- Database* openDb(OperationContext* txn, StringData ns, bool* justCreated = NULL);
+ Database* openDb(OperationContext* opCtx, StringData ns, bool* justCreated = NULL);
/**
* Closes the specified database. Must be called with the database locked in X-mode.
*/
- void close(OperationContext* txn, StringData ns);
+ void close(OperationContext* opCtx, StringData ns);
/**
* Closes all opened databases. Must be called with the global lock acquired in X-mode.
@@ -75,7 +75,7 @@ public:
* @param result Populated with the names of the databases, which were closed.
* @param force Force close even if something underway - use at shutdown
*/
- bool closeAll(OperationContext* txn, BSONObjBuilder& result, bool force);
+ bool closeAll(OperationContext* opCtx, BSONObjBuilder& result, bool force);
/**
* Returns the set of existing database names that differ only in casing.
diff --git a/src/mongo/db/catalog/document_validation.h b/src/mongo/db/catalog/document_validation.h
index e5b0fc0555c..e92d5bd4edd 100644
--- a/src/mongo/db/catalog/document_validation.h
+++ b/src/mongo/db/catalog/document_validation.h
@@ -56,17 +56,17 @@ class DisableDocumentValidation {
MONGO_DISALLOW_COPYING(DisableDocumentValidation);
public:
- DisableDocumentValidation(OperationContext* txn)
- : _txn(txn), _initialState(documentValidationDisabled(_txn)) {
- documentValidationDisabled(_txn) = true;
+ DisableDocumentValidation(OperationContext* opCtx)
+ : _opCtx(opCtx), _initialState(documentValidationDisabled(_opCtx)) {
+ documentValidationDisabled(_opCtx) = true;
}
~DisableDocumentValidation() {
- documentValidationDisabled(_txn) = _initialState;
+ documentValidationDisabled(_opCtx) = _initialState;
}
private:
- OperationContext* const _txn;
+ OperationContext* const _opCtx;
const bool _initialState;
};
@@ -75,9 +75,9 @@ private:
*/
class DisableDocumentValidationIfTrue {
public:
- DisableDocumentValidationIfTrue(OperationContext* txn, bool shouldDisableValidation) {
+ DisableDocumentValidationIfTrue(OperationContext* opCtx, bool shouldDisableValidation) {
if (shouldDisableValidation)
- _documentValidationDisabler.emplace(txn);
+ _documentValidationDisabler.emplace(opCtx);
}
private:
diff --git a/src/mongo/db/catalog/drop_collection.cpp b/src/mongo/db/catalog/drop_collection.cpp
index ae5d0fed3d7..3699f1a76ad 100644
--- a/src/mongo/db/catalog/drop_collection.cpp
+++ b/src/mongo/db/catalog/drop_collection.cpp
@@ -49,7 +49,7 @@
namespace mongo {
-Status dropCollection(OperationContext* txn,
+Status dropCollection(OperationContext* opCtx,
const NamespaceString& collectionName,
BSONObjBuilder& result) {
if (!serverGlobalParams.quiet.load()) {
@@ -59,22 +59,23 @@ Status dropCollection(OperationContext* txn,
const std::string dbname = collectionName.db().toString();
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
+ ScopedTransaction transaction(opCtx, MODE_IX);
- AutoGetDb autoDb(txn, dbname, MODE_X);
+ AutoGetDb autoDb(opCtx, dbname, MODE_X);
Database* const db = autoDb.getDb();
Collection* coll = db ? db->getCollection(collectionName) : nullptr;
- auto view = db && !coll ? db->getViewCatalog()->lookup(txn, collectionName.ns()) : nullptr;
+ auto view =
+ db && !coll ? db->getViewCatalog()->lookup(opCtx, collectionName.ns()) : nullptr;
if (!db || (!coll && !view)) {
return Status(ErrorCodes::NamespaceNotFound, "ns not found");
}
const bool shardVersionCheck = true;
- OldClientContext context(txn, collectionName.ns(), shardVersionCheck);
+ OldClientContext context(opCtx, collectionName.ns(), shardVersionCheck);
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, collectionName);
+ bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, collectionName);
if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::NotMaster,
@@ -82,16 +83,16 @@ Status dropCollection(OperationContext* txn,
<< collectionName.ns());
}
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
result.append("ns", collectionName.ns());
if (coll) {
invariant(!view);
- int numIndexes = coll->getIndexCatalog()->numIndexesTotal(txn);
+ int numIndexes = coll->getIndexCatalog()->numIndexesTotal(opCtx);
BackgroundOperation::assertNoBgOpInProgForNs(collectionName.ns());
- Status s = db->dropCollection(txn, collectionName.ns());
+ Status s = db->dropCollection(opCtx, collectionName.ns());
if (!s.isOK()) {
return s;
@@ -100,14 +101,14 @@ Status dropCollection(OperationContext* txn,
result.append("nIndexesWas", numIndexes);
} else {
invariant(view);
- Status status = db->dropView(txn, collectionName.ns());
+ Status status = db->dropView(opCtx, collectionName.ns());
if (!status.isOK()) {
return status;
}
}
wunit.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "drop", collectionName.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "drop", collectionName.ns());
return Status::OK();
}
diff --git a/src/mongo/db/catalog/drop_collection.h b/src/mongo/db/catalog/drop_collection.h
index c62b2c376c5..f0d51b6b051 100644
--- a/src/mongo/db/catalog/drop_collection.h
+++ b/src/mongo/db/catalog/drop_collection.h
@@ -37,7 +37,7 @@ class OperationContext;
* Drops the collection "collectionName" and populates "result" with statistics about what
* was removed.
*/
-Status dropCollection(OperationContext* txn,
+Status dropCollection(OperationContext* opCtx,
const NamespaceString& collectionName,
BSONObjBuilder& result);
} // namespace mongo
diff --git a/src/mongo/db/catalog/drop_database.cpp b/src/mongo/db/catalog/drop_database.cpp
index e1a4ce63e33..4c20d20ffa9 100644
--- a/src/mongo/db/catalog/drop_database.cpp
+++ b/src/mongo/db/catalog/drop_database.cpp
@@ -48,21 +48,21 @@
namespace mongo {
-Status dropDatabase(OperationContext* txn, const std::string& dbName) {
+Status dropDatabase(OperationContext* opCtx, const std::string& dbName) {
uassert(ErrorCodes::IllegalOperation,
"Cannot drop a database in read-only mode",
!storageGlobalParams.readOnly);
// TODO (Kal): OldClientContext legacy, needs to be removed
{
- CurOp::get(txn)->ensureStarted();
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setNS_inlock(dbName);
+ CurOp::get(opCtx)->ensureStarted();
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setNS_inlock(dbName);
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
- AutoGetDb autoDB(txn, dbName, MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite lk(opCtx->lockState());
+ AutoGetDb autoDB(opCtx, dbName, MODE_X);
Database* const db = autoDB.getDb();
if (!db) {
return Status(ErrorCodes::NamespaceNotFound,
@@ -70,8 +70,8 @@ Status dropDatabase(OperationContext* txn, const std::string& dbName) {
<< " because it does not exist");
}
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(txn, dbName);
+ bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(opCtx, dbName);
if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::NotMaster,
@@ -79,16 +79,16 @@ Status dropDatabase(OperationContext* txn, const std::string& dbName) {
}
log() << "dropDatabase " << dbName << " starting";
- Database::dropDatabase(txn, db);
+ Database::dropDatabase(opCtx, db);
log() << "dropDatabase " << dbName << " finished";
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
- getGlobalServiceContext()->getOpObserver()->onDropDatabase(txn, dbName + ".$cmd");
+ getGlobalServiceContext()->getOpObserver()->onDropDatabase(opCtx, dbName + ".$cmd");
wunit.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "dropDatabase", dbName);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "dropDatabase", dbName);
return Status::OK();
}
diff --git a/src/mongo/db/catalog/drop_database.h b/src/mongo/db/catalog/drop_database.h
index b60e817be27..e0a0c8560e5 100644
--- a/src/mongo/db/catalog/drop_database.h
+++ b/src/mongo/db/catalog/drop_database.h
@@ -34,5 +34,5 @@ class OperationContext;
/**
* Drops the database "dbName".
*/
-Status dropDatabase(OperationContext* txn, const std::string& dbName);
+Status dropDatabase(OperationContext* opCtx, const std::string& dbName);
} // namespace mongo
diff --git a/src/mongo/db/catalog/drop_indexes.cpp b/src/mongo/db/catalog/drop_indexes.cpp
index 78739b6e4da..55c559912df 100644
--- a/src/mongo/db/catalog/drop_indexes.cpp
+++ b/src/mongo/db/catalog/drop_indexes.cpp
@@ -48,7 +48,7 @@
namespace mongo {
namespace {
-Status wrappedRun(OperationContext* txn,
+Status wrappedRun(OperationContext* opCtx,
const StringData& dbname,
const std::string& toDeleteNs,
Database* const db,
@@ -61,7 +61,7 @@ Status wrappedRun(OperationContext* txn,
// If db/collection does not exist, short circuit and return.
if (!db || !collection) {
- if (db && db->getViewCatalog()->lookup(txn, toDeleteNs)) {
+ if (db && db->getViewCatalog()->lookup(opCtx, toDeleteNs)) {
return {ErrorCodes::CommandNotSupportedOnView,
str::stream() << "Cannot drop indexes on view " << toDeleteNs};
}
@@ -69,11 +69,11 @@ Status wrappedRun(OperationContext* txn,
return Status(ErrorCodes::NamespaceNotFound, "ns not found");
}
- OldClientContext ctx(txn, toDeleteNs);
+ OldClientContext ctx(opCtx, toDeleteNs);
BackgroundOperation::assertNoBgOpInProgForNs(toDeleteNs);
IndexCatalog* indexCatalog = collection->getIndexCatalog();
- anObjBuilder->appendNumber("nIndexesWas", indexCatalog->numIndexesTotal(txn));
+ anObjBuilder->appendNumber("nIndexesWas", indexCatalog->numIndexesTotal(opCtx));
BSONElement f = jsobj.getField("index");
@@ -81,7 +81,7 @@ Status wrappedRun(OperationContext* txn,
std::string indexToDelete = f.valuestr();
if (indexToDelete == "*") {
- Status s = indexCatalog->dropAllIndexes(txn, false);
+ Status s = indexCatalog->dropAllIndexes(opCtx, false);
if (!s.isOK()) {
return s;
}
@@ -89,7 +89,8 @@ Status wrappedRun(OperationContext* txn,
return Status::OK();
}
- IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByName(txn, indexToDelete);
+ IndexDescriptor* desc =
+ collection->getIndexCatalog()->findIndexByName(opCtx, indexToDelete);
if (desc == NULL) {
return Status(ErrorCodes::IndexNotFound,
str::stream() << "index not found with name [" << indexToDelete << "]");
@@ -99,7 +100,7 @@ Status wrappedRun(OperationContext* txn,
return Status(ErrorCodes::InvalidOptions, "cannot drop _id index");
}
- Status s = indexCatalog->dropIndex(txn, desc);
+ Status s = indexCatalog->dropIndex(opCtx, desc);
if (!s.isOK()) {
return s;
}
@@ -110,7 +111,7 @@ Status wrappedRun(OperationContext* txn,
if (f.type() == Object) {
std::vector<IndexDescriptor*> indexes;
collection->getIndexCatalog()->findIndexesByKeyPattern(
- txn, f.embeddedObject(), false, &indexes);
+ opCtx, f.embeddedObject(), false, &indexes);
if (indexes.empty()) {
return Status(ErrorCodes::IndexNotFound,
str::stream() << "can't find index with key: " << f.embeddedObject());
@@ -130,7 +131,7 @@ Status wrappedRun(OperationContext* txn,
return Status(ErrorCodes::InvalidOptions, "cannot drop _id index");
}
- Status s = indexCatalog->dropIndex(txn, desc);
+ Status s = indexCatalog->dropIndex(opCtx, desc);
if (!s.isOK()) {
return s;
}
@@ -142,35 +143,35 @@ Status wrappedRun(OperationContext* txn,
}
} // namespace
-Status dropIndexes(OperationContext* txn,
+Status dropIndexes(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& idxDescriptor,
BSONObjBuilder* result) {
StringData dbName = nss.db();
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetDb autoDb(txn, dbName, MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ AutoGetDb autoDb(opCtx, dbName, MODE_X);
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, nss);
+ bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, nss);
if (userInitiatedWritesAndNotPrimary) {
return {ErrorCodes::NotMaster,
str::stream() << "Not primary while dropping indexes in " << nss.ns()};
}
- WriteUnitOfWork wunit(txn);
- Status status = wrappedRun(txn, dbName, nss.ns(), autoDb.getDb(), idxDescriptor, result);
+ WriteUnitOfWork wunit(opCtx);
+ Status status = wrappedRun(opCtx, dbName, nss.ns(), autoDb.getDb(), idxDescriptor, result);
if (!status.isOK()) {
return status;
}
getGlobalServiceContext()->getOpObserver()->onDropIndex(
- txn, dbName.toString() + ".$cmd", idxDescriptor);
+ opCtx, dbName.toString() + ".$cmd", idxDescriptor);
wunit.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "dropIndexes", dbName);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "dropIndexes", dbName);
return Status::OK();
}
diff --git a/src/mongo/db/catalog/drop_indexes.h b/src/mongo/db/catalog/drop_indexes.h
index 931fa348019..67f4b5a64c2 100644
--- a/src/mongo/db/catalog/drop_indexes.h
+++ b/src/mongo/db/catalog/drop_indexes.h
@@ -38,7 +38,7 @@ class OperationContext;
* Drops the index from collection "ns" that matches the "idxDescriptor" and populates
* "result" with some statistics about the dropped index.
*/
-Status dropIndexes(OperationContext* txn,
+Status dropIndexes(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& idxDescriptor,
BSONObjBuilder* result);
diff --git a/src/mongo/db/catalog/head_manager.h b/src/mongo/db/catalog/head_manager.h
index 7a671ccf69f..12042e33f84 100644
--- a/src/mongo/db/catalog/head_manager.h
+++ b/src/mongo/db/catalog/head_manager.h
@@ -42,9 +42,9 @@ class HeadManager {
public:
virtual ~HeadManager() {}
- virtual const RecordId getHead(OperationContext* txn) const = 0;
+ virtual const RecordId getHead(OperationContext* opCtx) const = 0;
- virtual void setHead(OperationContext* txn, const RecordId newHead) = 0;
+ virtual void setHead(OperationContext* opCtx, const RecordId newHead) = 0;
};
} // namespace mongo
diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp
index a34baf61c28..32fb80b5587 100644
--- a/src/mongo/db/catalog/index_catalog.cpp
+++ b/src/mongo/db/catalog/index_catalog.cpp
@@ -99,26 +99,26 @@ IndexCatalog::~IndexCatalog() {
_magic = 123456;
}
-Status IndexCatalog::init(OperationContext* txn) {
+Status IndexCatalog::init(OperationContext* opCtx) {
vector<string> indexNames;
- _collection->getCatalogEntry()->getAllIndexes(txn, &indexNames);
+ _collection->getCatalogEntry()->getAllIndexes(opCtx, &indexNames);
for (size_t i = 0; i < indexNames.size(); i++) {
const string& indexName = indexNames[i];
- BSONObj spec = _collection->getCatalogEntry()->getIndexSpec(txn, indexName).getOwned();
+ BSONObj spec = _collection->getCatalogEntry()->getIndexSpec(opCtx, indexName).getOwned();
- if (!_collection->getCatalogEntry()->isIndexReady(txn, indexName)) {
+ if (!_collection->getCatalogEntry()->isIndexReady(opCtx, indexName)) {
_unfinishedIndexes.push_back(spec);
continue;
}
BSONObj keyPattern = spec.getObjectField("key");
IndexDescriptor* descriptor =
- new IndexDescriptor(_collection, _getAccessMethodName(txn, keyPattern), spec);
+ new IndexDescriptor(_collection, _getAccessMethodName(opCtx, keyPattern), spec);
const bool initFromDisk = true;
- IndexCatalogEntry* entry = _setupInMemoryStructures(txn, descriptor, initFromDisk);
+ IndexCatalogEntry* entry = _setupInMemoryStructures(opCtx, descriptor, initFromDisk);
- fassert(17340, entry->isReady(txn));
+ fassert(17340, entry->isReady(opCtx));
}
if (_unfinishedIndexes.size()) {
@@ -132,36 +132,36 @@ Status IndexCatalog::init(OperationContext* txn) {
return Status::OK();
}
-IndexCatalogEntry* IndexCatalog::_setupInMemoryStructures(OperationContext* txn,
+IndexCatalogEntry* IndexCatalog::_setupInMemoryStructures(OperationContext* opCtx,
IndexDescriptor* descriptor,
bool initFromDisk) {
unique_ptr<IndexDescriptor> descriptorCleanup(descriptor);
- Status status = _isSpecOk(txn, descriptor->infoObj());
+ Status status = _isSpecOk(opCtx, descriptor->infoObj());
if (!status.isOK() && status != ErrorCodes::IndexAlreadyExists) {
severe() << "Found an invalid index " << descriptor->infoObj() << " on the "
<< _collection->ns().ns() << " collection: " << redact(status);
fassertFailedNoTrace(28782);
}
- auto entry = stdx::make_unique<IndexCatalogEntry>(txn,
+ auto entry = stdx::make_unique<IndexCatalogEntry>(opCtx,
_collection->ns().ns(),
_collection->getCatalogEntry(),
descriptorCleanup.release(),
_collection->infoCache());
std::unique_ptr<IndexAccessMethod> accessMethod(
- _collection->_dbce->getIndex(txn, _collection->getCatalogEntry(), entry.get()));
+ _collection->_dbce->getIndex(opCtx, _collection->getCatalogEntry(), entry.get()));
entry->init(std::move(accessMethod));
IndexCatalogEntry* save = entry.get();
_entries.add(entry.release());
if (!initFromDisk) {
- txn->recoveryUnit()->onRollback([this, txn, descriptor] {
+ opCtx->recoveryUnit()->onRollback([this, opCtx, descriptor] {
// Need to preserve indexName as descriptor no longer exists after remove().
const std::string indexName = descriptor->indexName();
_entries.remove(descriptor);
- _collection->infoCache()->droppedIndex(txn, indexName);
+ _collection->infoCache()->droppedIndex(opCtx, indexName);
});
}
@@ -193,11 +193,11 @@ Status IndexCatalog::checkUnfinished() const {
<< _collection->ns().ns());
}
-bool IndexCatalog::_shouldOverridePlugin(OperationContext* txn, const BSONObj& keyPattern) const {
+bool IndexCatalog::_shouldOverridePlugin(OperationContext* opCtx, const BSONObj& keyPattern) const {
string pluginName = IndexNames::findPluginName(keyPattern);
bool known = IndexNames::isKnownName(pluginName);
- if (!_collection->_dbce->isOlderThan24(txn)) {
+ if (!_collection->_dbce->isOlderThan24(opCtx)) {
// RulesFor24+
// This assert will be triggered when downgrading from a future version that
// supports an index plugin unsupported by this version.
@@ -225,8 +225,9 @@ bool IndexCatalog::_shouldOverridePlugin(OperationContext* txn, const BSONObj& k
return false;
}
-string IndexCatalog::_getAccessMethodName(OperationContext* txn, const BSONObj& keyPattern) const {
- if (_shouldOverridePlugin(txn, keyPattern)) {
+string IndexCatalog::_getAccessMethodName(OperationContext* opCtx,
+ const BSONObj& keyPattern) const {
+ if (_shouldOverridePlugin(opCtx, keyPattern)) {
return "";
}
@@ -236,7 +237,7 @@ string IndexCatalog::_getAccessMethodName(OperationContext* txn, const BSONObj&
// ---------------------------
-Status IndexCatalog::_upgradeDatabaseMinorVersionIfNeeded(OperationContext* txn,
+Status IndexCatalog::_upgradeDatabaseMinorVersionIfNeeded(OperationContext* opCtx,
const string& newPluginName) {
// first check if requested index requires pdfile minor version to be bumped
if (IndexNames::existedBefore24(newPluginName)) {
@@ -245,7 +246,7 @@ Status IndexCatalog::_upgradeDatabaseMinorVersionIfNeeded(OperationContext* txn,
DatabaseCatalogEntry* dbce = _collection->_dbce;
- if (!dbce->isOlderThan24(txn)) {
+ if (!dbce->isOlderThan24(opCtx)) {
return Status::OK(); // these checks have already been done
}
@@ -255,7 +256,7 @@ Status IndexCatalog::_upgradeDatabaseMinorVersionIfNeeded(OperationContext* txn,
// which allows creation of indexes using new plugins.
RecordStore* indexes = dbce->getRecordStore(dbce->name() + ".system.indexes");
- auto cursor = indexes->getCursor(txn);
+ auto cursor = indexes->getCursor(opCtx);
while (auto record = cursor->next()) {
const BSONObj index = record->data.releaseToBson();
const BSONObj key = index.getObjectField("key");
@@ -271,45 +272,45 @@ Status IndexCatalog::_upgradeDatabaseMinorVersionIfNeeded(OperationContext* txn,
return Status(ErrorCodes::CannotCreateIndex, errmsg);
}
- dbce->markIndexSafe24AndUp(txn);
+ dbce->markIndexSafe24AndUp(opCtx);
return Status::OK();
}
-StatusWith<BSONObj> IndexCatalog::prepareSpecForCreate(OperationContext* txn,
+StatusWith<BSONObj> IndexCatalog::prepareSpecForCreate(OperationContext* opCtx,
const BSONObj& original) const {
- Status status = _isSpecOk(txn, original);
+ Status status = _isSpecOk(opCtx, original);
if (!status.isOK())
return StatusWith<BSONObj>(status);
- auto fixed = _fixIndexSpec(txn, _collection, original);
+ auto fixed = _fixIndexSpec(opCtx, _collection, original);
if (!fixed.isOK()) {
return fixed;
}
// we double check with new index spec
- status = _isSpecOk(txn, fixed.getValue());
+ status = _isSpecOk(opCtx, fixed.getValue());
if (!status.isOK())
return StatusWith<BSONObj>(status);
- status = _doesSpecConflictWithExisting(txn, fixed.getValue());
+ status = _doesSpecConflictWithExisting(opCtx, fixed.getValue());
if (!status.isOK())
return StatusWith<BSONObj>(status);
return fixed;
}
-StatusWith<BSONObj> IndexCatalog::createIndexOnEmptyCollection(OperationContext* txn,
+StatusWith<BSONObj> IndexCatalog::createIndexOnEmptyCollection(OperationContext* opCtx,
BSONObj spec) {
- invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().toString(), MODE_X));
- invariant(_collection->numRecords(txn) == 0);
+ invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns().toString(), MODE_X));
+ invariant(_collection->numRecords(opCtx) == 0);
_checkMagic();
Status status = checkUnfinished();
if (!status.isOK())
return status;
- StatusWith<BSONObj> statusWithSpec = prepareSpecForCreate(txn, spec);
+ StatusWith<BSONObj> statusWithSpec = prepareSpecForCreate(opCtx, spec);
status = statusWithSpec.getStatus();
if (!status.isOK())
return status;
@@ -317,13 +318,13 @@ StatusWith<BSONObj> IndexCatalog::createIndexOnEmptyCollection(OperationContext*
string pluginName = IndexNames::findPluginName(spec["key"].Obj());
if (pluginName.size()) {
- Status s = _upgradeDatabaseMinorVersionIfNeeded(txn, pluginName);
+ Status s = _upgradeDatabaseMinorVersionIfNeeded(opCtx, pluginName);
if (!s.isOK())
return s;
}
// now going to touch disk
- IndexBuildBlock indexBuildBlock(txn, _collection, spec);
+ IndexBuildBlock indexBuildBlock(opCtx, _collection, spec);
status = indexBuildBlock.init();
if (!status.isOK())
return status;
@@ -335,18 +336,18 @@ StatusWith<BSONObj> IndexCatalog::createIndexOnEmptyCollection(OperationContext*
invariant(descriptor);
invariant(entry == _entries.find(descriptor));
- status = entry->accessMethod()->initializeAsEmpty(txn);
+ status = entry->accessMethod()->initializeAsEmpty(opCtx);
if (!status.isOK())
return status;
indexBuildBlock.success();
// sanity check
- invariant(_collection->getCatalogEntry()->isIndexReady(txn, descriptor->indexName()));
+ invariant(_collection->getCatalogEntry()->isIndexReady(opCtx, descriptor->indexName()));
return spec;
}
-IndexCatalog::IndexBuildBlock::IndexBuildBlock(OperationContext* txn,
+IndexCatalog::IndexBuildBlock::IndexBuildBlock(OperationContext* opCtx,
Collection* collection,
const BSONObj& spec)
: _collection(collection),
@@ -354,7 +355,7 @@ IndexCatalog::IndexBuildBlock::IndexBuildBlock(OperationContext* txn,
_ns(_catalog->_collection->ns().ns()),
_spec(spec.getOwned()),
_entry(NULL),
- _txn(txn) {
+ _opCtx(opCtx) {
invariant(collection);
}
@@ -370,18 +371,18 @@ Status IndexCatalog::IndexBuildBlock::init() {
/// ---------- setup on disk structures ----------------
- Status status = _collection->getCatalogEntry()->prepareForIndexBuild(_txn, descriptor);
+ Status status = _collection->getCatalogEntry()->prepareForIndexBuild(_opCtx, descriptor);
if (!status.isOK())
return status;
/// ---------- setup in memory structures ----------------
const bool initFromDisk = false;
- _entry = _catalog->_setupInMemoryStructures(_txn, descriptorCleaner.release(), initFromDisk);
+ _entry = _catalog->_setupInMemoryStructures(_opCtx, descriptorCleaner.release(), initFromDisk);
// Register this index with the CollectionInfoCache to regenerate the cache. This way, updates
// occurring while an index is being build in the background will be aware of whether or not
// they need to modify any indexes.
- _collection->infoCache()->addedIndex(_txn, descriptor);
+ _collection->infoCache()->addedIndex(_opCtx, descriptor);
return Status::OK();
}
@@ -397,9 +398,9 @@ void IndexCatalog::IndexBuildBlock::fail() {
invariant(entry == _entry);
if (entry) {
- _catalog->_dropIndex(_txn, entry);
+ _catalog->_dropIndex(_opCtx, entry);
} else {
- _catalog->_deleteIndexFromDisk(_txn, _indexName, _indexNamespace);
+ _catalog->_deleteIndexFromDisk(_opCtx, _indexName, _indexNamespace);
}
}
@@ -407,24 +408,24 @@ void IndexCatalog::IndexBuildBlock::success() {
Collection* collection = _catalog->_collection;
fassert(17207, collection->ok());
NamespaceString ns(_indexNamespace);
- invariant(_txn->lockState()->isDbLockedForMode(ns.db(), MODE_X));
+ invariant(_opCtx->lockState()->isDbLockedForMode(ns.db(), MODE_X));
- collection->getCatalogEntry()->indexBuildSuccess(_txn, _indexName);
+ collection->getCatalogEntry()->indexBuildSuccess(_opCtx, _indexName);
- IndexDescriptor* desc = _catalog->findIndexByName(_txn, _indexName, true);
+ IndexDescriptor* desc = _catalog->findIndexByName(_opCtx, _indexName, true);
fassert(17330, desc);
IndexCatalogEntry* entry = _catalog->_entries.find(desc);
fassert(17331, entry && entry == _entry);
- OperationContext* txn = _txn;
+ OperationContext* opCtx = _opCtx;
LOG(2) << "marking index " << _indexName << " as ready in snapshot id "
- << txn->recoveryUnit()->getSnapshotId();
- _txn->recoveryUnit()->onCommit([txn, entry, collection] {
+ << opCtx->recoveryUnit()->getSnapshotId();
+ _opCtx->recoveryUnit()->onCommit([opCtx, entry, collection] {
// Note: this runs after the WUOW commits but before we release our X lock on the
// collection. This means that any snapshot created after this must include the full index,
// and no one can try to read this index before we set the visibility.
- auto replCoord = repl::ReplicationCoordinator::get(txn);
- auto snapshotName = replCoord->reserveSnapshotName(txn);
+ auto replCoord = repl::ReplicationCoordinator::get(opCtx);
+ auto snapshotName = replCoord->reserveSnapshotName(opCtx);
replCoord->forceSnapshotCreation(); // Ensures a newer snapshot gets created even if idle.
entry->setMinimumVisibleSnapshot(snapshotName);
@@ -470,7 +471,7 @@ Status _checkValidFilterExpressions(MatchExpression* expression, int level = 0)
}
}
-Status IndexCatalog::_isSpecOk(OperationContext* txn, const BSONObj& spec) const {
+Status IndexCatalog::_isSpecOk(OperationContext* opCtx, const BSONObj& spec) const {
const NamespaceString& nss = _collection->ns();
BSONElement vElt = spec["v"];
@@ -505,7 +506,7 @@ Status IndexCatalog::_isSpecOk(OperationContext* txn, const BSONObj& spec) const
// SERVER-16893 Forbid use of v0 indexes with non-mmapv1 engines
if (indexVersion == IndexVersion::kV0 &&
- !txn->getServiceContext()->getGlobalStorageEngine()->isMmapV1()) {
+ !opCtx->getServiceContext()->getGlobalStorageEngine()->isMmapV1()) {
return Status(ErrorCodes::CannotCreateIndex,
str::stream() << "use of v0 indexes is only allowed with the "
<< "mmapv1 storage engine");
@@ -577,7 +578,7 @@ Status IndexCatalog::_isSpecOk(OperationContext* txn, const BSONObj& spec) const
return Status(ErrorCodes::CannotCreateIndex,
"\"collation\" for an index must be a document");
}
- auto statusWithCollator = CollatorFactoryInterface::get(txn->getServiceContext())
+ auto statusWithCollator = CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(collationElement.Obj());
if (!statusWithCollator.isOK()) {
return statusWithCollator.getStatus();
@@ -696,7 +697,7 @@ Status IndexCatalog::_isSpecOk(OperationContext* txn, const BSONObj& spec) const
return Status::OK();
}
-Status IndexCatalog::_doesSpecConflictWithExisting(OperationContext* txn,
+Status IndexCatalog::_doesSpecConflictWithExisting(OperationContext* opCtx,
const BSONObj& spec) const {
const char* name = spec.getStringField("name");
invariant(name[0]);
@@ -706,7 +707,7 @@ Status IndexCatalog::_doesSpecConflictWithExisting(OperationContext* txn,
{
// Check both existing and in-progress indexes (2nd param = true)
- const IndexDescriptor* desc = findIndexByName(txn, name, true);
+ const IndexDescriptor* desc = findIndexByName(opCtx, name, true);
if (desc) {
// index already exists with same name
@@ -736,7 +737,7 @@ Status IndexCatalog::_doesSpecConflictWithExisting(OperationContext* txn,
<< spec);
}
- IndexDescriptor temp(_collection, _getAccessMethodName(txn, key), spec);
+ IndexDescriptor temp(_collection, _getAccessMethodName(opCtx, key), spec);
if (!desc->areIndexOptionsEquivalent(&temp))
return Status(ErrorCodes::IndexOptionsConflict,
str::stream() << "Index with name: " << name
@@ -753,12 +754,12 @@ Status IndexCatalog::_doesSpecConflictWithExisting(OperationContext* txn,
// Check both existing and in-progress indexes.
const bool findInProgressIndexes = true;
const IndexDescriptor* desc =
- findIndexByKeyPatternAndCollationSpec(txn, key, collation, findInProgressIndexes);
+ findIndexByKeyPatternAndCollationSpec(opCtx, key, collation, findInProgressIndexes);
if (desc) {
LOG(2) << "index already exists with diff name " << name << " pattern: " << key
<< " collation: " << collation;
- IndexDescriptor temp(_collection, _getAccessMethodName(txn, key), spec);
+ IndexDescriptor temp(_collection, _getAccessMethodName(opCtx, key), spec);
if (!desc->areIndexOptionsEquivalent(&temp))
return Status(ErrorCodes::IndexOptionsConflict,
str::stream() << "Index: " << spec
@@ -770,7 +771,7 @@ Status IndexCatalog::_doesSpecConflictWithExisting(OperationContext* txn,
}
}
- if (numIndexesTotal(txn) >= _maxNumIndexesAllowed) {
+ if (numIndexesTotal(opCtx) >= _maxNumIndexesAllowed) {
string s = str::stream() << "add index fails, too many indexes for "
<< _collection->ns().ns() << " key:" << key;
log() << s;
@@ -783,7 +784,7 @@ Status IndexCatalog::_doesSpecConflictWithExisting(OperationContext* txn,
if (pluginName == IndexNames::TEXT) {
vector<IndexDescriptor*> textIndexes;
const bool includeUnfinishedIndexes = true;
- findIndexByType(txn, IndexNames::TEXT, textIndexes, includeUnfinishedIndexes);
+ findIndexByType(opCtx, IndexNames::TEXT, textIndexes, includeUnfinishedIndexes);
if (textIndexes.size() > 0) {
return Status(ErrorCodes::CannotCreateIndex,
str::stream() << "only one text index per collection allowed, "
@@ -813,8 +814,8 @@ BSONObj IndexCatalog::getDefaultIdIndexSpec(
return b.obj();
}
-Status IndexCatalog::dropAllIndexes(OperationContext* txn, bool includingIdIndex) {
- invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().toString(), MODE_X));
+Status IndexCatalog::dropAllIndexes(OperationContext* opCtx, bool includingIdIndex) {
+ invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns().toString(), MODE_X));
BackgroundOperation::assertNoBgOpInProgForNs(_collection->ns().ns());
@@ -825,14 +826,14 @@ Status IndexCatalog::dropAllIndexes(OperationContext* txn, bool includingIdIndex
// make sure nothing in progress
massert(17348,
"cannot dropAllIndexes when index builds in progress",
- numIndexesTotal(txn) == numIndexesReady(txn));
+ numIndexesTotal(opCtx) == numIndexesReady(opCtx));
bool haveIdIndex = false;
vector<string> indexNamesToDrop;
{
int seen = 0;
- IndexIterator ii = getIndexIterator(txn, true);
+ IndexIterator ii = getIndexIterator(opCtx, true);
while (ii.more()) {
seen++;
IndexDescriptor* desc = ii.next();
@@ -842,39 +843,39 @@ Status IndexCatalog::dropAllIndexes(OperationContext* txn, bool includingIdIndex
}
indexNamesToDrop.push_back(desc->indexName());
}
- invariant(seen == numIndexesTotal(txn));
+ invariant(seen == numIndexesTotal(opCtx));
}
for (size_t i = 0; i < indexNamesToDrop.size(); i++) {
string indexName = indexNamesToDrop[i];
- IndexDescriptor* desc = findIndexByName(txn, indexName, true);
+ IndexDescriptor* desc = findIndexByName(opCtx, indexName, true);
invariant(desc);
LOG(1) << "\t dropAllIndexes dropping: " << desc->toString();
IndexCatalogEntry* entry = _entries.find(desc);
invariant(entry);
- _dropIndex(txn, entry);
+ _dropIndex(opCtx, entry);
}
// verify state is sane post cleaning
long long numIndexesInCollectionCatalogEntry =
- _collection->getCatalogEntry()->getTotalIndexCount(txn);
+ _collection->getCatalogEntry()->getTotalIndexCount(opCtx);
if (haveIdIndex) {
- fassert(17324, numIndexesTotal(txn) == 1);
- fassert(17325, numIndexesReady(txn) == 1);
+ fassert(17324, numIndexesTotal(opCtx) == 1);
+ fassert(17325, numIndexesReady(opCtx) == 1);
fassert(17326, numIndexesInCollectionCatalogEntry == 1);
fassert(17336, _entries.size() == 1);
} else {
- if (numIndexesTotal(txn) || numIndexesInCollectionCatalogEntry || _entries.size()) {
+ if (numIndexesTotal(opCtx) || numIndexesInCollectionCatalogEntry || _entries.size()) {
error() << "About to fassert - "
- << " numIndexesTotal(): " << numIndexesTotal(txn)
+ << " numIndexesTotal(): " << numIndexesTotal(opCtx)
<< " numSystemIndexesEntries: " << numIndexesInCollectionCatalogEntry
<< " _entries.size(): " << _entries.size()
<< " indexNamesToDrop: " << indexNamesToDrop.size()
<< " haveIdIndex: " << haveIdIndex;
}
- fassert(17327, numIndexesTotal(txn) == 0);
+ fassert(17327, numIndexesTotal(opCtx) == 0);
fassert(17328, numIndexesInCollectionCatalogEntry == 0);
fassert(17337, _entries.size() == 0);
}
@@ -882,34 +883,34 @@ Status IndexCatalog::dropAllIndexes(OperationContext* txn, bool includingIdIndex
return Status::OK();
}
-Status IndexCatalog::dropIndex(OperationContext* txn, IndexDescriptor* desc) {
- invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().toString(), MODE_X));
+Status IndexCatalog::dropIndex(OperationContext* opCtx, IndexDescriptor* desc) {
+ invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns().toString(), MODE_X));
IndexCatalogEntry* entry = _entries.find(desc);
if (!entry)
return Status(ErrorCodes::InternalError, "cannot find index to delete");
- if (!entry->isReady(txn))
+ if (!entry->isReady(opCtx))
return Status(ErrorCodes::InternalError, "cannot delete not ready index");
BackgroundOperation::assertNoBgOpInProgForNs(_collection->ns().ns());
- return _dropIndex(txn, entry);
+ return _dropIndex(opCtx, entry);
}
namespace {
class IndexRemoveChange final : public RecoveryUnit::Change {
public:
- IndexRemoveChange(OperationContext* txn,
+ IndexRemoveChange(OperationContext* opCtx,
Collection* collection,
IndexCatalogEntryContainer* entries,
IndexCatalogEntry* entry)
- : _txn(txn), _collection(collection), _entries(entries), _entry(entry) {}
+ : _opCtx(opCtx), _collection(collection), _entries(entries), _entry(entry) {}
void commit() final {
// Ban reading from this collection on committed reads on snapshots before now.
- auto replCoord = repl::ReplicationCoordinator::get(_txn);
- auto snapshotName = replCoord->reserveSnapshotName(_txn);
+ auto replCoord = repl::ReplicationCoordinator::get(_opCtx);
+ auto snapshotName = replCoord->reserveSnapshotName(_opCtx);
replCoord->forceSnapshotCreation(); // Ensures a newer snapshot gets created even if idle.
_collection->setMinimumVisibleSnapshot(snapshotName);
@@ -918,18 +919,18 @@ public:
void rollback() final {
_entries->add(_entry);
- _collection->infoCache()->addedIndex(_txn, _entry->descriptor());
+ _collection->infoCache()->addedIndex(_opCtx, _entry->descriptor());
}
private:
- OperationContext* _txn;
+ OperationContext* _opCtx;
Collection* _collection;
IndexCatalogEntryContainer* _entries;
IndexCatalogEntry* _entry;
};
} // namespace
-Status IndexCatalog::_dropIndex(OperationContext* txn, IndexCatalogEntry* entry) {
+Status IndexCatalog::_dropIndex(OperationContext* opCtx, IndexCatalogEntry* entry) {
/**
* IndexState in order
* <db>.system.indexes
@@ -955,7 +956,7 @@ Status IndexCatalog::_dropIndex(OperationContext* txn, IndexCatalogEntry* entry)
// being built.
// TODO only kill cursors that are actually using the index rather than everything on this
// collection.
- if (entry->isReady(txn)) {
+ if (entry->isReady(opCtx)) {
_collection->getCursorManager()->invalidateAll(
false, str::stream() << "index '" << indexName << "' dropped");
}
@@ -964,21 +965,22 @@ Status IndexCatalog::_dropIndex(OperationContext* txn, IndexCatalogEntry* entry)
audit::logDropIndex(&cc(), indexName, _collection->ns().ns());
invariant(_entries.release(entry->descriptor()) == entry);
- txn->recoveryUnit()->registerChange(new IndexRemoveChange(txn, _collection, &_entries, entry));
+ opCtx->recoveryUnit()->registerChange(
+ new IndexRemoveChange(opCtx, _collection, &_entries, entry));
entry = NULL;
- _deleteIndexFromDisk(txn, indexName, indexNamespace);
+ _deleteIndexFromDisk(opCtx, indexName, indexNamespace);
_checkMagic();
- _collection->infoCache()->droppedIndex(txn, indexName);
+ _collection->infoCache()->droppedIndex(opCtx, indexName);
return Status::OK();
}
-void IndexCatalog::_deleteIndexFromDisk(OperationContext* txn,
+void IndexCatalog::_deleteIndexFromDisk(OperationContext* opCtx,
const string& indexName,
const string& indexNamespace) {
- Status status = _collection->getCatalogEntry()->removeIndex(txn, indexName);
+ Status status = _collection->getCatalogEntry()->removeIndex(opCtx, indexName);
if (status.code() == ErrorCodes::NamespaceNotFound) {
// this is ok, as we may be partially through index creation
} else if (!status.isOK()) {
@@ -987,30 +989,30 @@ void IndexCatalog::_deleteIndexFromDisk(OperationContext* txn,
}
}
-vector<BSONObj> IndexCatalog::getAndClearUnfinishedIndexes(OperationContext* txn) {
+vector<BSONObj> IndexCatalog::getAndClearUnfinishedIndexes(OperationContext* opCtx) {
vector<BSONObj> toReturn = _unfinishedIndexes;
_unfinishedIndexes.clear();
for (size_t i = 0; i < toReturn.size(); i++) {
BSONObj spec = toReturn[i];
BSONObj keyPattern = spec.getObjectField("key");
- IndexDescriptor desc(_collection, _getAccessMethodName(txn, keyPattern), spec);
+ IndexDescriptor desc(_collection, _getAccessMethodName(opCtx, keyPattern), spec);
- _deleteIndexFromDisk(txn, desc.indexName(), desc.indexNamespace());
+ _deleteIndexFromDisk(opCtx, desc.indexName(), desc.indexNamespace());
}
return toReturn;
}
-bool IndexCatalog::isMultikey(OperationContext* txn, const IndexDescriptor* idx) {
+bool IndexCatalog::isMultikey(OperationContext* opCtx, const IndexDescriptor* idx) {
IndexCatalogEntry* entry = _entries.find(idx);
invariant(entry);
return entry->isMultikey();
}
-MultikeyPaths IndexCatalog::getMultikeyPaths(OperationContext* txn, const IndexDescriptor* idx) {
+MultikeyPaths IndexCatalog::getMultikeyPaths(OperationContext* opCtx, const IndexDescriptor* idx) {
IndexCatalogEntry* entry = _entries.find(idx);
invariant(entry);
- return entry->getMultikeyPaths(txn);
+ return entry->getMultikeyPaths(opCtx);
}
// ---------------------------
@@ -1019,32 +1021,32 @@ bool IndexCatalog::haveAnyIndexes() const {
return _entries.size() != 0;
}
-int IndexCatalog::numIndexesTotal(OperationContext* txn) const {
+int IndexCatalog::numIndexesTotal(OperationContext* opCtx) const {
int count = _entries.size() + _unfinishedIndexes.size();
- dassert(_collection->getCatalogEntry()->getTotalIndexCount(txn) == count);
+ dassert(_collection->getCatalogEntry()->getTotalIndexCount(opCtx) == count);
return count;
}
-int IndexCatalog::numIndexesReady(OperationContext* txn) const {
+int IndexCatalog::numIndexesReady(OperationContext* opCtx) const {
int count = 0;
- IndexIterator ii = getIndexIterator(txn, /*includeUnfinished*/ false);
+ IndexIterator ii = getIndexIterator(opCtx, /*includeUnfinished*/ false);
while (ii.more()) {
ii.next();
count++;
}
- dassert(_collection->getCatalogEntry()->getCompletedIndexCount(txn) == count);
+ dassert(_collection->getCatalogEntry()->getCompletedIndexCount(opCtx) == count);
return count;
}
-bool IndexCatalog::haveIdIndex(OperationContext* txn) const {
- return findIdIndex(txn) != NULL;
+bool IndexCatalog::haveIdIndex(OperationContext* opCtx) const {
+ return findIdIndex(opCtx) != NULL;
}
-IndexCatalog::IndexIterator::IndexIterator(OperationContext* txn,
+IndexCatalog::IndexIterator::IndexIterator(OperationContext* opCtx,
const IndexCatalog* cat,
bool includeUnfinishedIndexes)
: _includeUnfinishedIndexes(includeUnfinishedIndexes),
- _txn(txn),
+ _opCtx(opCtx),
_catalog(cat),
_iterator(cat->_entries.begin()),
_start(true),
@@ -1086,7 +1088,7 @@ void IndexCatalog::IndexIterator::_advance() {
if (!_includeUnfinishedIndexes) {
if (auto minSnapshot = entry->getMinimumVisibleSnapshot()) {
- if (auto mySnapshot = _txn->recoveryUnit()->getMajorityCommittedSnapshot()) {
+ if (auto mySnapshot = _opCtx->recoveryUnit()->getMajorityCommittedSnapshot()) {
if (mySnapshot < minSnapshot) {
// This index isn't finished in my snapshot.
continue;
@@ -1094,7 +1096,7 @@ void IndexCatalog::IndexIterator::_advance() {
}
}
- if (!entry->isReady(_txn))
+ if (!entry->isReady(_opCtx))
continue;
}
@@ -1104,8 +1106,8 @@ void IndexCatalog::IndexIterator::_advance() {
}
-IndexDescriptor* IndexCatalog::findIdIndex(OperationContext* txn) const {
- IndexIterator ii = getIndexIterator(txn, false);
+IndexDescriptor* IndexCatalog::findIdIndex(OperationContext* opCtx) const {
+ IndexIterator ii = getIndexIterator(opCtx, false);
while (ii.more()) {
IndexDescriptor* desc = ii.next();
if (desc->isIdIndex())
@@ -1114,10 +1116,10 @@ IndexDescriptor* IndexCatalog::findIdIndex(OperationContext* txn) const {
return NULL;
}
-IndexDescriptor* IndexCatalog::findIndexByName(OperationContext* txn,
+IndexDescriptor* IndexCatalog::findIndexByName(OperationContext* opCtx,
StringData name,
bool includeUnfinishedIndexes) const {
- IndexIterator ii = getIndexIterator(txn, includeUnfinishedIndexes);
+ IndexIterator ii = getIndexIterator(opCtx, includeUnfinishedIndexes);
while (ii.more()) {
IndexDescriptor* desc = ii.next();
if (desc->indexName() == name)
@@ -1127,11 +1129,11 @@ IndexDescriptor* IndexCatalog::findIndexByName(OperationContext* txn,
}
IndexDescriptor* IndexCatalog::findIndexByKeyPatternAndCollationSpec(
- OperationContext* txn,
+ OperationContext* opCtx,
const BSONObj& key,
const BSONObj& collationSpec,
bool includeUnfinishedIndexes) const {
- IndexIterator ii = getIndexIterator(txn, includeUnfinishedIndexes);
+ IndexIterator ii = getIndexIterator(opCtx, includeUnfinishedIndexes);
while (ii.more()) {
IndexDescriptor* desc = ii.next();
if (SimpleBSONObjComparator::kInstance.evaluate(desc->keyPattern() == key) &&
@@ -1143,12 +1145,12 @@ IndexDescriptor* IndexCatalog::findIndexByKeyPatternAndCollationSpec(
return NULL;
}
-void IndexCatalog::findIndexesByKeyPattern(OperationContext* txn,
+void IndexCatalog::findIndexesByKeyPattern(OperationContext* opCtx,
const BSONObj& key,
bool includeUnfinishedIndexes,
std::vector<IndexDescriptor*>* matches) const {
invariant(matches);
- IndexIterator ii = getIndexIterator(txn, includeUnfinishedIndexes);
+ IndexIterator ii = getIndexIterator(opCtx, includeUnfinishedIndexes);
while (ii.more()) {
IndexDescriptor* desc = ii.next();
if (SimpleBSONObjComparator::kInstance.evaluate(desc->keyPattern() == key)) {
@@ -1157,12 +1159,12 @@ void IndexCatalog::findIndexesByKeyPattern(OperationContext* txn,
}
}
-IndexDescriptor* IndexCatalog::findShardKeyPrefixedIndex(OperationContext* txn,
+IndexDescriptor* IndexCatalog::findShardKeyPrefixedIndex(OperationContext* opCtx,
const BSONObj& shardKey,
bool requireSingleKey) const {
IndexDescriptor* best = NULL;
- IndexIterator ii = getIndexIterator(txn, false);
+ IndexIterator ii = getIndexIterator(opCtx, false);
while (ii.more()) {
IndexDescriptor* desc = ii.next();
bool hasSimpleCollation = desc->infoObj().getObjectField("collation").isEmpty();
@@ -1173,7 +1175,7 @@ IndexDescriptor* IndexCatalog::findShardKeyPrefixedIndex(OperationContext* txn,
if (!shardKey.isPrefixOf(desc->keyPattern(), SimpleBSONElementComparator::kInstance))
continue;
- if (!desc->isMultikey(txn) && hasSimpleCollation)
+ if (!desc->isMultikey(opCtx) && hasSimpleCollation)
return desc;
if (!requireSingleKey && hasSimpleCollation)
@@ -1183,11 +1185,11 @@ IndexDescriptor* IndexCatalog::findShardKeyPrefixedIndex(OperationContext* txn,
return best;
}
-void IndexCatalog::findIndexByType(OperationContext* txn,
+void IndexCatalog::findIndexByType(OperationContext* opCtx,
const string& type,
vector<IndexDescriptor*>& matches,
bool includeUnfinishedIndexes) const {
- IndexIterator ii = getIndexIterator(txn, includeUnfinishedIndexes);
+ IndexIterator ii = getIndexIterator(opCtx, includeUnfinishedIndexes);
while (ii.more()) {
IndexDescriptor* desc = ii.next();
if (IndexNames::findPluginName(desc->keyPattern()) == type) {
@@ -1213,13 +1215,13 @@ const IndexCatalogEntry* IndexCatalog::getEntry(const IndexDescriptor* desc) con
}
-const IndexDescriptor* IndexCatalog::refreshEntry(OperationContext* txn,
+const IndexDescriptor* IndexCatalog::refreshEntry(OperationContext* opCtx,
const IndexDescriptor* oldDesc) {
- invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
invariant(!BackgroundOperation::inProgForNs(_collection->ns()));
const std::string indexName = oldDesc->indexName();
- invariant(_collection->getCatalogEntry()->isIndexReady(txn, indexName));
+ invariant(_collection->getCatalogEntry()->isIndexReady(opCtx, indexName));
// Notify other users of the IndexCatalog that we're about to invalidate 'oldDesc'.
const bool collectionGoingAway = false;
@@ -1229,19 +1231,19 @@ const IndexDescriptor* IndexCatalog::refreshEntry(OperationContext* txn,
// Delete the IndexCatalogEntry that owns this descriptor. After deletion, 'oldDesc' is
// invalid and should not be dereferenced.
IndexCatalogEntry* oldEntry = _entries.release(oldDesc);
- txn->recoveryUnit()->registerChange(
- new IndexRemoveChange(txn, _collection, &_entries, oldEntry));
+ opCtx->recoveryUnit()->registerChange(
+ new IndexRemoveChange(opCtx, _collection, &_entries, oldEntry));
// Ask the CollectionCatalogEntry for the new index spec.
- BSONObj spec = _collection->getCatalogEntry()->getIndexSpec(txn, indexName).getOwned();
+ BSONObj spec = _collection->getCatalogEntry()->getIndexSpec(opCtx, indexName).getOwned();
BSONObj keyPattern = spec.getObjectField("key");
// Re-register this index in the index catalog with the new spec.
IndexDescriptor* newDesc =
- new IndexDescriptor(_collection, _getAccessMethodName(txn, keyPattern), spec);
+ new IndexDescriptor(_collection, _getAccessMethodName(opCtx, keyPattern), spec);
const bool initFromDisk = false;
- const IndexCatalogEntry* newEntry = _setupInMemoryStructures(txn, newDesc, initFromDisk);
- invariant(newEntry->isReady(txn));
+ const IndexCatalogEntry* newEntry = _setupInMemoryStructures(opCtx, newDesc, initFromDisk);
+ invariant(newEntry->isReady(opCtx));
// Return the new descriptor.
return newEntry->descriptor();
@@ -1249,18 +1251,18 @@ const IndexDescriptor* IndexCatalog::refreshEntry(OperationContext* txn,
// ---------------------------
-Status IndexCatalog::_indexFilteredRecords(OperationContext* txn,
+Status IndexCatalog::_indexFilteredRecords(OperationContext* opCtx,
IndexCatalogEntry* index,
const std::vector<BsonRecord>& bsonRecords,
int64_t* keysInsertedOut) {
InsertDeleteOptions options;
- prepareInsertDeleteOptions(txn, index->descriptor(), &options);
+ prepareInsertDeleteOptions(opCtx, index->descriptor(), &options);
for (auto bsonRecord : bsonRecords) {
int64_t inserted;
invariant(bsonRecord.id != RecordId());
Status status = index->accessMethod()->insert(
- txn, *bsonRecord.docPtr, bsonRecord.id, options, &inserted);
+ opCtx, *bsonRecord.docPtr, bsonRecord.id, options, &inserted);
if (!status.isOK())
return status;
@@ -1271,13 +1273,13 @@ Status IndexCatalog::_indexFilteredRecords(OperationContext* txn,
return Status::OK();
}
-Status IndexCatalog::_indexRecords(OperationContext* txn,
+Status IndexCatalog::_indexRecords(OperationContext* opCtx,
IndexCatalogEntry* index,
const std::vector<BsonRecord>& bsonRecords,
int64_t* keysInsertedOut) {
const MatchExpression* filter = index->getFilterExpression();
if (!filter)
- return _indexFilteredRecords(txn, index, bsonRecords, keysInsertedOut);
+ return _indexFilteredRecords(opCtx, index, bsonRecords, keysInsertedOut);
std::vector<BsonRecord> filteredBsonRecords;
for (auto bsonRecord : bsonRecords) {
@@ -1285,26 +1287,26 @@ Status IndexCatalog::_indexRecords(OperationContext* txn,
filteredBsonRecords.push_back(bsonRecord);
}
- return _indexFilteredRecords(txn, index, filteredBsonRecords, keysInsertedOut);
+ return _indexFilteredRecords(opCtx, index, filteredBsonRecords, keysInsertedOut);
}
-Status IndexCatalog::_unindexRecord(OperationContext* txn,
+Status IndexCatalog::_unindexRecord(OperationContext* opCtx,
IndexCatalogEntry* index,
const BSONObj& obj,
const RecordId& loc,
bool logIfError,
int64_t* keysDeletedOut) {
InsertDeleteOptions options;
- prepareInsertDeleteOptions(txn, index->descriptor(), &options);
+ prepareInsertDeleteOptions(opCtx, index->descriptor(), &options);
options.logIfError = logIfError;
// For unindex operations, dupsAllowed=false really means that it is safe to delete anything
// that matches the key, without checking the RecordID, since dups are impossible. We need
// to disable this behavior for in-progress indexes. See SERVER-17487 for more details.
- options.dupsAllowed = options.dupsAllowed || !index->isReady(txn);
+ options.dupsAllowed = options.dupsAllowed || !index->isReady(opCtx);
int64_t removed;
- Status status = index->accessMethod()->remove(txn, obj, loc, options, &removed);
+ Status status = index->accessMethod()->remove(opCtx, obj, loc, options, &removed);
if (!status.isOK()) {
log() << "Couldn't unindex record " << redact(obj) << " from collection "
@@ -1319,7 +1321,7 @@ Status IndexCatalog::_unindexRecord(OperationContext* txn,
}
-Status IndexCatalog::indexRecords(OperationContext* txn,
+Status IndexCatalog::indexRecords(OperationContext* opCtx,
const std::vector<BsonRecord>& bsonRecords,
int64_t* keysInsertedOut) {
if (keysInsertedOut) {
@@ -1328,7 +1330,7 @@ Status IndexCatalog::indexRecords(OperationContext* txn,
for (IndexCatalogEntryContainer::const_iterator i = _entries.begin(); i != _entries.end();
++i) {
- Status s = _indexRecords(txn, *i, bsonRecords, keysInsertedOut);
+ Status s = _indexRecords(opCtx, *i, bsonRecords, keysInsertedOut);
if (!s.isOK())
return s;
}
@@ -1336,7 +1338,7 @@ Status IndexCatalog::indexRecords(OperationContext* txn,
return Status::OK();
}
-void IndexCatalog::unindexRecord(OperationContext* txn,
+void IndexCatalog::unindexRecord(OperationContext* opCtx,
const BSONObj& obj,
const RecordId& loc,
bool noWarn,
@@ -1350,8 +1352,8 @@ void IndexCatalog::unindexRecord(OperationContext* txn,
IndexCatalogEntry* entry = *i;
// If it's a background index, we DO NOT want to log anything.
- bool logIfError = entry->isReady(txn) ? !noWarn : false;
- _unindexRecord(txn, entry, obj, loc, logIfError, keysDeletedOut);
+ bool logIfError = entry->isReady(opCtx) ? !noWarn : false;
+ _unindexRecord(opCtx, entry, obj, loc, logIfError, keysDeletedOut);
}
}
@@ -1365,11 +1367,11 @@ BSONObj IndexCatalog::fixIndexKey(const BSONObj& key) {
return key;
}
-void IndexCatalog::prepareInsertDeleteOptions(OperationContext* txn,
+void IndexCatalog::prepareInsertDeleteOptions(OperationContext* opCtx,
const IndexDescriptor* desc,
InsertDeleteOptions* options) {
- auto replCoord = repl::ReplicationCoordinator::get(txn);
- if (replCoord->shouldRelaxIndexConstraints(txn, NamespaceString(desc->parentNS()))) {
+ auto replCoord = repl::ReplicationCoordinator::get(opCtx);
+ if (replCoord->shouldRelaxIndexConstraints(opCtx, NamespaceString(desc->parentNS()))) {
options->getKeysMode = IndexAccessMethod::GetKeysMode::kRelaxConstraints;
} else {
options->getKeysMode = IndexAccessMethod::GetKeysMode::kEnforceConstraints;
@@ -1384,7 +1386,7 @@ void IndexCatalog::prepareInsertDeleteOptions(OperationContext* txn,
}
}
-StatusWith<BSONObj> IndexCatalog::_fixIndexSpec(OperationContext* txn,
+StatusWith<BSONObj> IndexCatalog::_fixIndexSpec(OperationContext* opCtx,
Collection* collection,
const BSONObj& spec) {
auto statusWithSpec = IndexLegacy::adjustIndexSpecObject(spec);
diff --git a/src/mongo/db/catalog/index_catalog.h b/src/mongo/db/catalog/index_catalog.h
index 4016b7860d4..469b86f04a8 100644
--- a/src/mongo/db/catalog/index_catalog.h
+++ b/src/mongo/db/catalog/index_catalog.h
@@ -60,17 +60,17 @@ public:
~IndexCatalog();
// must be called before used
- Status init(OperationContext* txn);
+ Status init(OperationContext* opCtx);
bool ok() const;
// ---- accessors -----
bool haveAnyIndexes() const;
- int numIndexesTotal(OperationContext* txn) const;
- int numIndexesReady(OperationContext* txn) const;
- int numIndexesInProgress(OperationContext* txn) const {
- return numIndexesTotal(txn) - numIndexesReady(txn);
+ int numIndexesTotal(OperationContext* opCtx) const;
+ int numIndexesReady(OperationContext* opCtx) const;
+ int numIndexesInProgress(OperationContext* opCtx) const {
+ return numIndexesTotal(opCtx) - numIndexesReady(opCtx);
}
/**
@@ -78,7 +78,7 @@ public:
* in which case everything from this tree has to go away
*/
- bool haveIdIndex(OperationContext* txn) const;
+ bool haveIdIndex(OperationContext* opCtx) const;
/**
* Returns the spec for the id index to create by default for this collection.
@@ -86,14 +86,14 @@ public:
BSONObj getDefaultIdIndexSpec(
ServerGlobalParams::FeatureCompatibility::Version featureCompatibilityVersion) const;
- IndexDescriptor* findIdIndex(OperationContext* txn) const;
+ IndexDescriptor* findIdIndex(OperationContext* opCtx) const;
/**
* Find index by name. The index name uniquely identifies an index.
*
* @return null if cannot find
*/
- IndexDescriptor* findIndexByName(OperationContext* txn,
+ IndexDescriptor* findIndexByName(OperationContext* opCtx,
StringData name,
bool includeUnfinishedIndexes = false) const;
@@ -108,7 +108,7 @@ public:
* collation.
*/
IndexDescriptor* findIndexByKeyPatternAndCollationSpec(
- OperationContext* txn,
+ OperationContext* opCtx,
const BSONObj& key,
const BSONObj& collationSpec,
bool includeUnfinishedIndexes = false) const;
@@ -119,7 +119,7 @@ public:
*
* Consider using 'findIndexByName' if expecting to match one index.
*/
- void findIndexesByKeyPattern(OperationContext* txn,
+ void findIndexesByKeyPattern(OperationContext* opCtx,
const BSONObj& key,
bool includeUnfinishedIndexes,
std::vector<IndexDescriptor*>* matches) const;
@@ -137,11 +137,11 @@ public:
*
* If no such index exists, returns NULL.
*/
- IndexDescriptor* findShardKeyPrefixedIndex(OperationContext* txn,
+ IndexDescriptor* findShardKeyPrefixedIndex(OperationContext* opCtx,
const BSONObj& shardKey,
bool requireSingleKey) const;
- void findIndexByType(OperationContext* txn,
+ void findIndexByType(OperationContext* opCtx,
const std::string& type,
std::vector<IndexDescriptor*>& matches,
bool includeUnfinishedIndexes = false) const;
@@ -158,7 +158,7 @@ public:
* an invalidateAll() on the cursor manager to notify other users of the IndexCatalog that
* this descriptor is now invalid.
*/
- const IndexDescriptor* refreshEntry(OperationContext* txn, const IndexDescriptor* oldDesc);
+ const IndexDescriptor* refreshEntry(OperationContext* opCtx, const IndexDescriptor* oldDesc);
// never returns NULL
const IndexCatalogEntry* getEntry(const IndexDescriptor* desc) const;
@@ -184,7 +184,7 @@ public:
IndexCatalogEntry* catalogEntry(const IndexDescriptor* desc);
private:
- IndexIterator(OperationContext* txn,
+ IndexIterator(OperationContext* opCtx,
const IndexCatalog* cat,
bool includeUnfinishedIndexes);
@@ -192,7 +192,7 @@ public:
bool _includeUnfinishedIndexes;
- OperationContext* const _txn;
+ OperationContext* const _opCtx;
const IndexCatalog* _catalog;
IndexCatalogEntryContainer::const_iterator _iterator;
@@ -204,8 +204,8 @@ public:
friend class IndexCatalog;
};
- IndexIterator getIndexIterator(OperationContext* txn, bool includeUnfinishedIndexes) const {
- return IndexIterator(txn, this, includeUnfinishedIndexes);
+ IndexIterator getIndexIterator(OperationContext* opCtx, bool includeUnfinishedIndexes) const {
+ return IndexIterator(opCtx, this, includeUnfinishedIndexes);
};
// ---- index set modifiers ------
@@ -215,19 +215,20 @@ public:
* empty collection can be rolled back as part of a larger WUOW. Returns the full specification
* of the created index, as it is stored in this index catalog.
*/
- StatusWith<BSONObj> createIndexOnEmptyCollection(OperationContext* txn, BSONObj spec);
+ StatusWith<BSONObj> createIndexOnEmptyCollection(OperationContext* opCtx, BSONObj spec);
- StatusWith<BSONObj> prepareSpecForCreate(OperationContext* txn, const BSONObj& original) const;
+ StatusWith<BSONObj> prepareSpecForCreate(OperationContext* opCtx,
+ const BSONObj& original) const;
- Status dropAllIndexes(OperationContext* txn, bool includingIdIndex);
+ Status dropAllIndexes(OperationContext* opCtx, bool includingIdIndex);
- Status dropIndex(OperationContext* txn, IndexDescriptor* desc);
+ Status dropIndex(OperationContext* opCtx, IndexDescriptor* desc);
/**
* will drop all incompleted indexes and return specs
* after this, the indexes can be rebuilt
*/
- std::vector<BSONObj> getAndClearUnfinishedIndexes(OperationContext* txn);
+ std::vector<BSONObj> getAndClearUnfinishedIndexes(OperationContext* opCtx);
struct IndexKillCriteria {
@@ -241,7 +242,7 @@ public:
/**
* Returns true if the index 'idx' is multikey, and returns false otherwise.
*/
- bool isMultikey(OperationContext* txn, const IndexDescriptor* idx);
+ bool isMultikey(OperationContext* opCtx, const IndexDescriptor* idx);
/**
* Returns the path components that cause the index 'idx' to be multikey if the index supports
@@ -252,7 +253,7 @@ public:
* returns a vector with size equal to the number of elements in the index key pattern where
* each element in the vector is an empty set.
*/
- MultikeyPaths getMultikeyPaths(OperationContext* txn, const IndexDescriptor* idx);
+ MultikeyPaths getMultikeyPaths(OperationContext* opCtx, const IndexDescriptor* idx);
// --- these probably become private?
@@ -270,7 +271,7 @@ public:
MONGO_DISALLOW_COPYING(IndexBuildBlock);
public:
- IndexBuildBlock(OperationContext* txn, Collection* collection, const BSONObj& spec);
+ IndexBuildBlock(OperationContext* opCtx, Collection* collection, const BSONObj& spec);
~IndexBuildBlock();
@@ -300,7 +301,7 @@ public:
IndexCatalogEntry* _entry;
bool _inProgress;
- OperationContext* _txn;
+ OperationContext* _opCtx;
};
// ----- data modifiers ------
@@ -311,7 +312,7 @@ public:
*
* This method may throw.
*/
- Status indexRecords(OperationContext* txn,
+ Status indexRecords(OperationContext* opCtx,
const std::vector<BsonRecord>& bsonRecords,
int64_t* keysInsertedOut);
@@ -319,7 +320,7 @@ public:
* When 'keysDeletedOut' is not null, it will be set to the number of index keys removed by
* this operation.
*/
- void unindexRecord(OperationContext* txn,
+ void unindexRecord(OperationContext* opCtx,
const BSONObj& obj,
const RecordId& loc,
bool noWarn,
@@ -327,11 +328,11 @@ public:
// ------- temp internal -------
- std::string getAccessMethodName(OperationContext* txn, const BSONObj& keyPattern) {
- return _getAccessMethodName(txn, keyPattern);
+ std::string getAccessMethodName(OperationContext* opCtx, const BSONObj& keyPattern) {
+ return _getAccessMethodName(opCtx, keyPattern);
}
- Status _upgradeDatabaseMinorVersionIfNeeded(OperationContext* txn,
+ Status _upgradeDatabaseMinorVersionIfNeeded(OperationContext* opCtx,
const std::string& newPluginName);
// public static helpers
@@ -342,35 +343,35 @@ public:
* Fills out 'options' in order to indicate whether to allow dups or relax
* index constraints, as needed by replication.
*/
- static void prepareInsertDeleteOptions(OperationContext* txn,
+ static void prepareInsertDeleteOptions(OperationContext* opCtx,
const IndexDescriptor* desc,
InsertDeleteOptions* options);
private:
static const BSONObj _idObj; // { _id : 1 }
- bool _shouldOverridePlugin(OperationContext* txn, const BSONObj& keyPattern) const;
+ bool _shouldOverridePlugin(OperationContext* opCtx, const BSONObj& keyPattern) const;
/**
* This differs from IndexNames::findPluginName in that returns the plugin name we *should*
* use, not the plugin name inside of the provided key pattern. To understand when these
* differ, see shouldOverridePlugin.
*/
- std::string _getAccessMethodName(OperationContext* txn, const BSONObj& keyPattern) const;
+ std::string _getAccessMethodName(OperationContext* opCtx, const BSONObj& keyPattern) const;
void _checkMagic() const;
- Status _indexFilteredRecords(OperationContext* txn,
+ Status _indexFilteredRecords(OperationContext* opCtx,
IndexCatalogEntry* index,
const std::vector<BsonRecord>& bsonRecords,
int64_t* keysInsertedOut);
- Status _indexRecords(OperationContext* txn,
+ Status _indexRecords(OperationContext* opCtx,
IndexCatalogEntry* index,
const std::vector<BsonRecord>& bsonRecords,
int64_t* keysInsertedOut);
- Status _unindexRecord(OperationContext* txn,
+ Status _unindexRecord(OperationContext* opCtx,
IndexCatalogEntry* index,
const BSONObj& obj,
const RecordId& loc,
@@ -380,18 +381,18 @@ private:
/**
* this does no sanity checks
*/
- Status _dropIndex(OperationContext* txn, IndexCatalogEntry* entry);
+ Status _dropIndex(OperationContext* opCtx, IndexCatalogEntry* entry);
// just does disk hanges
// doesn't change memory state, etc...
- void _deleteIndexFromDisk(OperationContext* txn,
+ void _deleteIndexFromDisk(OperationContext* opCtx,
const std::string& indexName,
const std::string& indexNamespace);
// descriptor ownership passes to _setupInMemoryStructures
// initFromDisk: Avoids registering a change to undo this operation when set to true.
// You must set this flag if calling this function outside of a UnitOfWork.
- IndexCatalogEntry* _setupInMemoryStructures(OperationContext* txn,
+ IndexCatalogEntry* _setupInMemoryStructures(OperationContext* opCtx,
IndexDescriptor* descriptor,
bool initFromDisk);
@@ -399,13 +400,13 @@ private:
// conform to the standard for insertion. This function adds the 'v' field if it didn't
// exist, removes the '_id' field if it exists, applies plugin-level transformations if
// appropriate, etc.
- static StatusWith<BSONObj> _fixIndexSpec(OperationContext* txn,
+ static StatusWith<BSONObj> _fixIndexSpec(OperationContext* opCtx,
Collection* collection,
const BSONObj& spec);
- Status _isSpecOk(OperationContext* txn, const BSONObj& spec) const;
+ Status _isSpecOk(OperationContext* opCtx, const BSONObj& spec) const;
- Status _doesSpecConflictWithExisting(OperationContext* txn, const BSONObj& spec) const;
+ Status _doesSpecConflictWithExisting(OperationContext* opCtx, const BSONObj& spec) const;
int _magic;
Collection* const _collection;
diff --git a/src/mongo/db/catalog/index_catalog_entry.cpp b/src/mongo/db/catalog/index_catalog_entry.cpp
index c67b29f8559..7400536bc8b 100644
--- a/src/mongo/db/catalog/index_catalog_entry.cpp
+++ b/src/mongo/db/catalog/index_catalog_entry.cpp
@@ -59,12 +59,12 @@ public:
HeadManagerImpl(IndexCatalogEntry* ice) : _catalogEntry(ice) {}
virtual ~HeadManagerImpl() {}
- const RecordId getHead(OperationContext* txn) const {
- return _catalogEntry->head(txn);
+ const RecordId getHead(OperationContext* opCtx) const {
+ return _catalogEntry->head(opCtx);
}
- void setHead(OperationContext* txn, const RecordId newHead) {
- _catalogEntry->setHead(txn, newHead);
+ void setHead(OperationContext* opCtx, const RecordId newHead) {
+ _catalogEntry->setHead(opCtx, newHead);
}
private:
@@ -72,7 +72,7 @@ private:
IndexCatalogEntry* _catalogEntry;
};
-IndexCatalogEntry::IndexCatalogEntry(OperationContext* txn,
+IndexCatalogEntry::IndexCatalogEntry(OperationContext* opCtx,
StringData ns,
CollectionCatalogEntry* collection,
IndexDescriptor* descriptor,
@@ -86,12 +86,12 @@ IndexCatalogEntry::IndexCatalogEntry(OperationContext* txn,
_isReady(false) {
_descriptor->_cachedEntry = this;
- _isReady = _catalogIsReady(txn);
- _head = _catalogHead(txn);
+ _isReady = _catalogIsReady(opCtx);
+ _head = _catalogHead(opCtx);
{
stdx::lock_guard<stdx::mutex> lk(_indexMultikeyPathsMutex);
- _isMultikey.store(_catalogIsMultikey(txn, &_indexMultikeyPaths));
+ _isMultikey.store(_catalogIsMultikey(opCtx, &_indexMultikeyPaths));
_indexTracksPathLevelMultikeyInfo = !_indexMultikeyPaths.empty();
}
@@ -99,7 +99,7 @@ IndexCatalogEntry::IndexCatalogEntry(OperationContext* txn,
invariant(collationElement.isABSONObj());
BSONObj collation = collationElement.Obj();
auto statusWithCollator =
- CollatorFactoryInterface::get(txn->getServiceContext())->makeFromBSON(collation);
+ CollatorFactoryInterface::get(opCtx->getServiceContext())->makeFromBSON(collation);
// Index spec should have already been validated.
invariantOK(statusWithCollator.getStatus());
@@ -132,13 +132,13 @@ void IndexCatalogEntry::init(std::unique_ptr<IndexAccessMethod> accessMethod) {
_accessMethod = std::move(accessMethod);
}
-const RecordId& IndexCatalogEntry::head(OperationContext* txn) const {
- DEV invariant(_head == _catalogHead(txn));
+const RecordId& IndexCatalogEntry::head(OperationContext* opCtx) const {
+ DEV invariant(_head == _catalogHead(opCtx));
return _head;
}
-bool IndexCatalogEntry::isReady(OperationContext* txn) const {
- DEV invariant(_isReady == _catalogIsReady(txn));
+bool IndexCatalogEntry::isReady(OperationContext* opCtx) const {
+ DEV invariant(_isReady == _catalogIsReady(opCtx));
return _isReady;
}
@@ -146,7 +146,7 @@ bool IndexCatalogEntry::isMultikey() const {
return _isMultikey.load();
}
-MultikeyPaths IndexCatalogEntry::getMultikeyPaths(OperationContext* txn) const {
+MultikeyPaths IndexCatalogEntry::getMultikeyPaths(OperationContext* opCtx) const {
stdx::lock_guard<stdx::mutex> lk(_indexMultikeyPathsMutex);
return _indexMultikeyPaths;
}
@@ -170,10 +170,10 @@ public:
const RecordId _oldHead;
};
-void IndexCatalogEntry::setHead(OperationContext* txn, RecordId newHead) {
- _collection->setIndexHead(txn, _descriptor->indexName(), newHead);
+void IndexCatalogEntry::setHead(OperationContext* opCtx, RecordId newHead) {
+ _collection->setIndexHead(opCtx, _descriptor->indexName(), newHead);
- txn->recoveryUnit()->registerChange(new SetHeadChange(this, _head));
+ opCtx->recoveryUnit()->registerChange(new SetHeadChange(this, _head));
_head = newHead;
}
@@ -185,21 +185,21 @@ void IndexCatalogEntry::setHead(OperationContext* txn, RecordId newHead) {
*/
class RecoveryUnitSwap {
public:
- RecoveryUnitSwap(OperationContext* txn, RecoveryUnit* newRecoveryUnit)
- : _txn(txn),
- _oldRecoveryUnit(_txn->releaseRecoveryUnit()),
+ RecoveryUnitSwap(OperationContext* opCtx, RecoveryUnit* newRecoveryUnit)
+ : _opCtx(opCtx),
+ _oldRecoveryUnit(_opCtx->releaseRecoveryUnit()),
_oldRecoveryUnitState(
- _txn->setRecoveryUnit(newRecoveryUnit, OperationContext::kNotInUnitOfWork)),
+ _opCtx->setRecoveryUnit(newRecoveryUnit, OperationContext::kNotInUnitOfWork)),
_newRecoveryUnit(newRecoveryUnit) {}
~RecoveryUnitSwap() {
- _txn->releaseRecoveryUnit();
- _txn->setRecoveryUnit(_oldRecoveryUnit, _oldRecoveryUnitState);
+ _opCtx->releaseRecoveryUnit();
+ _opCtx->setRecoveryUnit(_oldRecoveryUnit, _oldRecoveryUnitState);
}
private:
// Not owned
- OperationContext* const _txn;
+ OperationContext* const _opCtx;
// Owned, but life-time is not controlled
RecoveryUnit* const _oldRecoveryUnit;
@@ -209,7 +209,7 @@ private:
const std::unique_ptr<RecoveryUnit> _newRecoveryUnit;
};
-void IndexCatalogEntry::setMultikey(OperationContext* txn, const MultikeyPaths& multikeyPaths) {
+void IndexCatalogEntry::setMultikey(OperationContext* opCtx, const MultikeyPaths& multikeyPaths) {
if (!_indexTracksPathLevelMultikeyInfo && isMultikey()) {
// If the index is already set as multikey and we don't have any path-level information to
// update, then there's nothing more for us to do.
@@ -243,7 +243,8 @@ void IndexCatalogEntry::setMultikey(OperationContext* txn, const MultikeyPaths&
{
// Only one thread should set the multi-key value per collection, because the metadata for a
// collection is one large document.
- Lock::ResourceLock collMDLock(txn->lockState(), ResourceId(RESOURCE_METADATA, _ns), MODE_X);
+ Lock::ResourceLock collMDLock(
+ opCtx->lockState(), ResourceId(RESOURCE_METADATA, _ns), MODE_X);
if (!_indexTracksPathLevelMultikeyInfo && isMultikey()) {
// It's possible that we raced with another thread when acquiring the MD lock. If the
@@ -257,9 +258,9 @@ void IndexCatalogEntry::setMultikey(OperationContext* txn, const MultikeyPaths&
// snapshot isolation.
{
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- RecoveryUnitSwap ruSwap(txn, storageEngine->newRecoveryUnit());
+ RecoveryUnitSwap ruSwap(opCtx, storageEngine->newRecoveryUnit());
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
// It's possible that the index type (e.g. ascending/descending index) supports tracking
// path-level multikey information, but this particular index doesn't.
@@ -267,7 +268,7 @@ void IndexCatalogEntry::setMultikey(OperationContext* txn, const MultikeyPaths&
// multikey information in order to avoid unintentionally setting path-level multikey
// information on an index created before 3.4.
if (_collection->setIndexIsMultikey(
- txn,
+ opCtx,
_descriptor->indexName(),
_indexTracksPathLevelMultikeyInfo ? multikeyPaths : MultikeyPaths{})) {
if (_infoCache) {
@@ -293,17 +294,17 @@ void IndexCatalogEntry::setMultikey(OperationContext* txn, const MultikeyPaths&
// ----
-bool IndexCatalogEntry::_catalogIsReady(OperationContext* txn) const {
- return _collection->isIndexReady(txn, _descriptor->indexName());
+bool IndexCatalogEntry::_catalogIsReady(OperationContext* opCtx) const {
+ return _collection->isIndexReady(opCtx, _descriptor->indexName());
}
-RecordId IndexCatalogEntry::_catalogHead(OperationContext* txn) const {
- return _collection->getIndexHead(txn, _descriptor->indexName());
+RecordId IndexCatalogEntry::_catalogHead(OperationContext* opCtx) const {
+ return _collection->getIndexHead(opCtx, _descriptor->indexName());
}
-bool IndexCatalogEntry::_catalogIsMultikey(OperationContext* txn,
+bool IndexCatalogEntry::_catalogIsMultikey(OperationContext* opCtx,
MultikeyPaths* multikeyPaths) const {
- return _collection->isIndexMultikey(txn, _descriptor->indexName(), multikeyPaths);
+ return _collection->isIndexMultikey(opCtx, _descriptor->indexName(), multikeyPaths);
}
// ------------------
diff --git a/src/mongo/db/catalog/index_catalog_entry.h b/src/mongo/db/catalog/index_catalog_entry.h
index 64f72822404..065d1a544ac 100644
--- a/src/mongo/db/catalog/index_catalog_entry.h
+++ b/src/mongo/db/catalog/index_catalog_entry.h
@@ -56,7 +56,7 @@ class IndexCatalogEntry {
MONGO_DISALLOW_COPYING(IndexCatalogEntry);
public:
- IndexCatalogEntry(OperationContext* txn,
+ IndexCatalogEntry(OperationContext* opCtx,
StringData ns,
CollectionCatalogEntry* collection, // not owned
IndexDescriptor* descriptor, // ownership passes to me
@@ -98,9 +98,9 @@ public:
/// ---------------------
- const RecordId& head(OperationContext* txn) const;
+ const RecordId& head(OperationContext* opCtx) const;
- void setHead(OperationContext* txn, RecordId newHead);
+ void setHead(OperationContext* opCtx, RecordId newHead);
void setIsReady(bool newIsReady);
@@ -124,7 +124,7 @@ public:
* returns a vector with size equal to the number of elements in the index key pattern where
* each element in the vector is an empty set.
*/
- MultikeyPaths getMultikeyPaths(OperationContext* txn) const;
+ MultikeyPaths getMultikeyPaths(OperationContext* opCtx) const;
/**
* Sets this index to be multikey. Information regarding which newly detected path components
@@ -136,10 +136,10 @@ public:
* with size equal to the number of elements in the index key pattern. Additionally, at least
* one path component of the indexed fields must cause this index to be multikey.
*/
- void setMultikey(OperationContext* txn, const MultikeyPaths& multikeyPaths);
+ void setMultikey(OperationContext* opCtx, const MultikeyPaths& multikeyPaths);
// if this ready is ready for queries
- bool isReady(OperationContext* txn) const;
+ bool isReady(OperationContext* opCtx) const;
/**
* If return value is not boost::none, reads with majority read concern using an older snapshot
@@ -157,15 +157,15 @@ private:
class SetMultikeyChange;
class SetHeadChange;
- bool _catalogIsReady(OperationContext* txn) const;
- RecordId _catalogHead(OperationContext* txn) const;
+ bool _catalogIsReady(OperationContext* opCtx) const;
+ RecordId _catalogHead(OperationContext* opCtx) const;
/**
* Retrieves the multikey information associated with this index from '_collection',
*
* See CollectionCatalogEntry::isIndexMultikey() for more details.
*/
- bool _catalogIsMultikey(OperationContext* txn, MultikeyPaths* multikeyPaths) const;
+ bool _catalogIsMultikey(OperationContext* opCtx, MultikeyPaths* multikeyPaths) const;
// -----
diff --git a/src/mongo/db/catalog/index_create.cpp b/src/mongo/db/catalog/index_create.cpp
index d7c86f27163..622706776f5 100644
--- a/src/mongo/db/catalog/index_create.cpp
+++ b/src/mongo/db/catalog/index_create.cpp
@@ -124,9 +124,9 @@ private:
MultiIndexBlock* const _indexer;
};
-MultiIndexBlock::MultiIndexBlock(OperationContext* txn, Collection* collection)
+MultiIndexBlock::MultiIndexBlock(OperationContext* opCtx, Collection* collection)
: _collection(collection),
- _txn(txn),
+ _opCtx(opCtx),
_buildInBackground(false),
_allowInterruption(false),
_ignoreUnique(false),
@@ -137,7 +137,7 @@ MultiIndexBlock::~MultiIndexBlock() {
return;
while (true) {
try {
- WriteUnitOfWork wunit(_txn);
+ WriteUnitOfWork wunit(_opCtx);
// This cleans up all index builds.
// Because that may need to write, it is done inside
// of a WUOW. Nothing inside this block can fail, and it is made fatal if it does.
@@ -164,7 +164,7 @@ MultiIndexBlock::~MultiIndexBlock() {
void MultiIndexBlock::removeExistingIndexes(std::vector<BSONObj>* specs) const {
for (size_t i = 0; i < specs->size(); i++) {
Status status =
- _collection->getIndexCatalog()->prepareSpecForCreate(_txn, (*specs)[i]).getStatus();
+ _collection->getIndexCatalog()->prepareSpecForCreate(_opCtx, (*specs)[i]).getStatus();
if (status.code() == ErrorCodes::IndexAlreadyExists) {
specs->erase(specs->begin() + i);
i--;
@@ -179,10 +179,10 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(const BSONObj& spec) {
}
StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(const std::vector<BSONObj>& indexSpecs) {
- WriteUnitOfWork wunit(_txn);
+ WriteUnitOfWork wunit(_opCtx);
invariant(_indexes.empty());
- _txn->recoveryUnit()->registerChange(new CleanupIndexesVectorOnRollback(this));
+ _opCtx->recoveryUnit()->registerChange(new CleanupIndexesVectorOnRollback(this));
const string& ns = _collection->ns().ns();
@@ -199,7 +199,7 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(const std::vector<BSONObj
string pluginName = IndexNames::findPluginName(info["key"].Obj());
if (pluginName.size()) {
Status s = _collection->getIndexCatalog()->_upgradeDatabaseMinorVersionIfNeeded(
- _txn, pluginName);
+ _opCtx, pluginName);
if (!s.isOK())
return s;
}
@@ -220,7 +220,7 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(const std::vector<BSONObj
for (size_t i = 0; i < indexSpecs.size(); i++) {
BSONObj info = indexSpecs[i];
StatusWith<BSONObj> statusWithInfo =
- _collection->getIndexCatalog()->prepareSpecForCreate(_txn, info);
+ _collection->getIndexCatalog()->prepareSpecForCreate(_opCtx, info);
Status status = statusWithInfo.getStatus();
if (!status.isOK())
return status;
@@ -228,13 +228,13 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(const std::vector<BSONObj
indexInfoObjs.push_back(info);
IndexToBuild index;
- index.block.reset(new IndexCatalog::IndexBuildBlock(_txn, _collection, info));
+ index.block.reset(new IndexCatalog::IndexBuildBlock(_opCtx, _collection, info));
status = index.block->init();
if (!status.isOK())
return status;
index.real = index.block->getEntry()->accessMethod();
- status = index.real->initializeAsEmpty(_txn);
+ status = index.real->initializeAsEmpty(_opCtx);
if (!status.isOK())
return status;
@@ -246,7 +246,7 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(const std::vector<BSONObj
const IndexDescriptor* descriptor = index.block->getEntry()->descriptor();
- IndexCatalog::prepareInsertDeleteOptions(_txn, descriptor, &index.options);
+ IndexCatalog::prepareInsertDeleteOptions(_opCtx, descriptor, &index.options);
index.options.dupsAllowed = index.options.dupsAllowed || _ignoreUnique;
if (_ignoreUnique) {
index.options.getKeysMode = IndexAccessMethod::GetKeysMode::kRelaxConstraints;
@@ -260,7 +260,7 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(const std::vector<BSONObj
index.filterExpression = index.block->getEntry()->getFilterExpression();
// TODO SERVER-14888 Suppress this in cases we don't want to audit.
- audit::logCreateIndex(_txn->getClient(), &info, descriptor->indexName(), ns);
+ audit::logCreateIndex(_opCtx->getClient(), &info, descriptor->indexName(), ns);
_indexes.push_back(std::move(index));
}
@@ -274,8 +274,8 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(const std::vector<BSONObj
log() << "Index build interrupted due to 'crashAfterStartingIndexBuild' failpoint. Exiting "
"after waiting for changes to become durable.";
Locker::LockSnapshot lockInfo;
- _txn->lockState()->saveLockStateAndUnlock(&lockInfo);
- if (_txn->recoveryUnit()->waitUntilDurable()) {
+ _opCtx->lockState()->saveLockStateAndUnlock(&lockInfo);
+ if (_opCtx->recoveryUnit()->waitUntilDurable()) {
quickExit(EXIT_TEST);
}
}
@@ -285,9 +285,10 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(const std::vector<BSONObj
Status MultiIndexBlock::insertAllDocumentsInCollection(std::set<RecordId>* dupsOut) {
const char* curopMessage = _buildInBackground ? "Index Build (background)" : "Index Build";
- const auto numRecords = _collection->numRecords(_txn);
- stdx::unique_lock<Client> lk(*_txn->getClient());
- ProgressMeterHolder progress(*_txn->setMessage_inlock(curopMessage, curopMessage, numRecords));
+ const auto numRecords = _collection->numRecords(_opCtx);
+ stdx::unique_lock<Client> lk(*_opCtx->getClient());
+ ProgressMeterHolder progress(
+ *_opCtx->setMessage_inlock(curopMessage, curopMessage, numRecords));
lk.unlock();
Timer t;
@@ -295,7 +296,7 @@ Status MultiIndexBlock::insertAllDocumentsInCollection(std::set<RecordId>* dupsO
unsigned long long n = 0;
unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(
- _txn, _collection->ns().ns(), _collection, PlanExecutor::YIELD_MANUAL));
+ _opCtx, _collection->ns().ns(), _collection, PlanExecutor::YIELD_MANUAL));
if (_buildInBackground) {
invariant(_allowInterruption);
exec->setYieldPolicy(PlanExecutor::YIELD_AUTO, _collection);
@@ -311,20 +312,20 @@ Status MultiIndexBlock::insertAllDocumentsInCollection(std::set<RecordId>* dupsO
(PlanExecutor::ADVANCED == (state = exec->getNextSnapshotted(&objToIndex, &loc)))) {
try {
if (_allowInterruption)
- _txn->checkForInterrupt();
+ _opCtx->checkForInterrupt();
// Make sure we are working with the latest version of the document.
- if (objToIndex.snapshotId() != _txn->recoveryUnit()->getSnapshotId() &&
- !_collection->findDoc(_txn, loc, &objToIndex)) {
+ if (objToIndex.snapshotId() != _opCtx->recoveryUnit()->getSnapshotId() &&
+ !_collection->findDoc(_opCtx, loc, &objToIndex)) {
// doc was deleted so don't index it.
retries = 0;
continue;
}
// Done before insert so we can retry document if it WCEs.
- progress->setTotalWhileRunning(_collection->numRecords(_txn));
+ progress->setTotalWhileRunning(_collection->numRecords(_opCtx));
- WriteUnitOfWork wunit(_txn);
+ WriteUnitOfWork wunit(_opCtx);
Status ret = insert(objToIndex.value(), loc);
if (_buildInBackground)
exec->saveState();
@@ -346,14 +347,14 @@ Status MultiIndexBlock::insertAllDocumentsInCollection(std::set<RecordId>* dupsO
n++;
retries = 0;
} catch (const WriteConflictException& wce) {
- CurOp::get(_txn)->debug().writeConflicts++;
+ CurOp::get(_opCtx)->debug().writeConflicts++;
retries++; // logAndBackoff expects this to be 1 on first call.
wce.logAndBackoff(retries, "index creation", _collection->ns().ns());
// Can't use WRITE_CONFLICT_RETRY_LOOP macros since we need to save/restore exec
// around call to abandonSnapshot.
exec->saveState();
- _txn->recoveryUnit()->abandonSnapshot();
+ _opCtx->recoveryUnit()->abandonSnapshot();
exec->restoreState(); // Handles any WCEs internally.
}
}
@@ -372,13 +373,13 @@ Status MultiIndexBlock::insertAllDocumentsInCollection(std::set<RecordId>* dupsO
}
// Check for interrupt to allow for killop prior to index build completion.
- _txn->checkForInterrupt();
+ _opCtx->checkForInterrupt();
}
if (MONGO_FAIL_POINT(hangAfterStartingIndexBuildUnlocked)) {
// Unlock before hanging so replication recognizes we've completed.
Locker::LockSnapshot lockInfo;
- _txn->lockState()->saveLockStateAndUnlock(&lockInfo);
+ _opCtx->lockState()->saveLockStateAndUnlock(&lockInfo);
while (MONGO_FAIL_POINT(hangAfterStartingIndexBuildUnlocked)) {
log() << "Hanging index build with no locks due to "
"'hangAfterStartingIndexBuildUnlocked' failpoint";
@@ -409,9 +410,9 @@ Status MultiIndexBlock::insert(const BSONObj& doc, const RecordId& loc) {
int64_t unused;
Status idxStatus(ErrorCodes::InternalError, "");
if (_indexes[i].bulk) {
- idxStatus = _indexes[i].bulk->insert(_txn, doc, loc, _indexes[i].options, &unused);
+ idxStatus = _indexes[i].bulk->insert(_opCtx, doc, loc, _indexes[i].options, &unused);
} else {
- idxStatus = _indexes[i].real->insert(_txn, doc, loc, _indexes[i].options, &unused);
+ idxStatus = _indexes[i].real->insert(_opCtx, doc, loc, _indexes[i].options, &unused);
}
if (!idxStatus.isOK())
@@ -426,7 +427,7 @@ Status MultiIndexBlock::doneInserting(std::set<RecordId>* dupsOut) {
continue;
LOG(1) << "\t bulk commit starting for index: "
<< _indexes[i].block->getEntry()->descriptor()->indexName();
- Status status = _indexes[i].real->commitBulk(_txn,
+ Status status = _indexes[i].real->commitBulk(_opCtx,
std::move(_indexes[i].bulk),
_allowInterruption,
_indexes[i].options.dupsAllowed,
@@ -449,7 +450,7 @@ void MultiIndexBlock::commit() {
_indexes[i].block->success();
}
- _txn->recoveryUnit()->registerChange(new SetNeedToCleanupOnRollback(this));
+ _opCtx->recoveryUnit()->registerChange(new SetNeedToCleanupOnRollback(this));
_needToCleanup = false;
}
diff --git a/src/mongo/db/catalog/index_create.h b/src/mongo/db/catalog/index_create.h
index 88dd5db8393..14bec4dafbf 100644
--- a/src/mongo/db/catalog/index_create.h
+++ b/src/mongo/db/catalog/index_create.h
@@ -65,7 +65,7 @@ public:
/**
* Neither pointer is owned.
*/
- MultiIndexBlock(OperationContext* txn, Collection* collection);
+ MultiIndexBlock(OperationContext* opCtx, Collection* collection);
~MultiIndexBlock();
/**
@@ -206,7 +206,7 @@ private:
// Pointers not owned here and must outlive 'this'
Collection* _collection;
- OperationContext* _txn;
+ OperationContext* _opCtx;
bool _buildInBackground;
bool _allowInterruption;
diff --git a/src/mongo/db/catalog/index_key_validate.cpp b/src/mongo/db/catalog/index_key_validate.cpp
index e1616a34c86..0d48247f68d 100644
--- a/src/mongo/db/catalog/index_key_validate.cpp
+++ b/src/mongo/db/catalog/index_key_validate.cpp
@@ -439,14 +439,14 @@ Status validateIndexSpecFieldNames(const BSONObj& indexSpec) {
return Status::OK();
}
-StatusWith<BSONObj> validateIndexSpecCollation(OperationContext* txn,
+StatusWith<BSONObj> validateIndexSpecCollation(OperationContext* opCtx,
const BSONObj& indexSpec,
const CollatorInterface* defaultCollator) {
if (auto collationElem = indexSpec[IndexDescriptor::kCollationFieldName]) {
// validateIndexSpec() should have already verified that 'collationElem' is an object.
invariant(collationElem.type() == BSONType::Object);
- auto collator = CollatorFactoryInterface::get(txn->getServiceContext())
+ auto collator = CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(collationElem.Obj());
if (!collator.isOK()) {
return collator.getStatus();
diff --git a/src/mongo/db/catalog/index_key_validate.h b/src/mongo/db/catalog/index_key_validate.h
index bb2cc7ff123..9ee07df7dc5 100644
--- a/src/mongo/db/catalog/index_key_validate.h
+++ b/src/mongo/db/catalog/index_key_validate.h
@@ -74,7 +74,7 @@ Status validateIndexSpecFieldNames(const BSONObj& indexSpec);
* collation spec. If 'collation' is missing, fills it in with the spec for 'defaultCollator'.
* Returns the index specification with 'collation' filled in.
*/
-StatusWith<BSONObj> validateIndexSpecCollation(OperationContext* txn,
+StatusWith<BSONObj> validateIndexSpecCollation(OperationContext* opCtx,
const BSONObj& indexSpec,
const CollatorInterface* defaultCollator);
diff --git a/src/mongo/db/catalog/index_spec_validate_test.cpp b/src/mongo/db/catalog/index_spec_validate_test.cpp
index 6d0bc3eb918..45e65b00d6f 100644
--- a/src/mongo/db/catalog/index_spec_validate_test.cpp
+++ b/src/mongo/db/catalog/index_spec_validate_test.cpp
@@ -790,11 +790,11 @@ TEST(IdIndexSpecValidateTest, ReturnsOKStatusIfAllFieldsAllowedForIdIndex) {
TEST(IndexSpecCollationValidateTest, FillsInFullCollationSpec) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
const CollatorInterface* defaultCollator = nullptr;
- auto result = validateIndexSpecCollation(txn.get(),
+ auto result = validateIndexSpecCollation(opCtx.get(),
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
<< "ns"
@@ -840,11 +840,11 @@ TEST(IndexSpecCollationValidateTest, FillsInFullCollationSpec) {
TEST(IndexSpecCollationValidateTest, RemovesCollationFieldIfSimple) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
const CollatorInterface* defaultCollator = nullptr;
- auto result = validateIndexSpecCollation(txn.get(),
+ auto result = validateIndexSpecCollation(opCtx.get(),
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
<< "ns"
@@ -869,11 +869,11 @@ TEST(IndexSpecCollationValidateTest, RemovesCollationFieldIfSimple) {
TEST(IndexSpecCollationValidateTest, FillsInCollationFieldWithCollectionDefaultIfNotPresent) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
const CollatorInterfaceMock defaultCollator(CollatorInterfaceMock::MockType::kReverseString);
- auto result = validateIndexSpecCollation(txn.get(),
+ auto result = validateIndexSpecCollation(opCtx.get(),
BSON("key" << BSON("field" << 1) << "name"
<< "indexName"
<< "ns"
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index fd8b6819df6..f48454fae29 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -52,29 +52,29 @@
namespace mongo {
namespace {
-static void dropCollection(OperationContext* txn, Database* db, StringData collName) {
- WriteUnitOfWork wunit(txn);
- if (db->dropCollection(txn, collName).isOK()) {
+static void dropCollection(OperationContext* opCtx, Database* db, StringData collName) {
+ WriteUnitOfWork wunit(opCtx);
+ if (db->dropCollection(opCtx, collName).isOK()) {
// ignoring failure case
wunit.commit();
}
}
} // namespace
-Status renameCollection(OperationContext* txn,
+Status renameCollection(OperationContext* opCtx,
const NamespaceString& source,
const NamespaceString& target,
bool dropTarget,
bool stayTemp) {
- DisableDocumentValidation validationDisabler(txn);
+ DisableDocumentValidation validationDisabler(opCtx);
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite globalWriteLock(txn->lockState());
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite globalWriteLock(opCtx->lockState());
// We stay in source context the whole time. This is mostly to set the CurOp namespace.
- OldClientContext ctx(txn, source.ns());
+ OldClientContext ctx(opCtx, source.ns());
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, source);
+ bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, source);
if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::NotMaster,
@@ -83,17 +83,17 @@ Status renameCollection(OperationContext* txn,
<< target.ns());
}
- Database* const sourceDB = dbHolder().get(txn, source.db());
+ Database* const sourceDB = dbHolder().get(opCtx, source.db());
Collection* const sourceColl = sourceDB ? sourceDB->getCollection(source.ns()) : nullptr;
if (!sourceColl) {
- if (sourceDB && sourceDB->getViewCatalog()->lookup(txn, source.ns()))
+ if (sourceDB && sourceDB->getViewCatalog()->lookup(opCtx, source.ns()))
return Status(ErrorCodes::CommandNotSupportedOnView,
str::stream() << "cannot rename view: " << source.ns());
return Status(ErrorCodes::NamespaceNotFound, "source namespace does not exist");
}
// Make sure the source collection is not sharded.
- if (CollectionShardingState::get(txn, source)->getMetadata()) {
+ if (CollectionShardingState::get(opCtx, source)->getMetadata()) {
return {ErrorCodes::IllegalOperation, "source namespace cannot be sharded"};
}
@@ -102,7 +102,7 @@ Status renameCollection(OperationContext* txn,
// Ensure that index names do not push the length over the max.
// Iterator includes unfinished indexes.
IndexCatalog::IndexIterator sourceIndIt =
- sourceColl->getIndexCatalog()->getIndexIterator(txn, true);
+ sourceColl->getIndexCatalog()->getIndexIterator(opCtx, true);
int longestIndexNameLength = 0;
while (sourceIndIt.more()) {
int thisLength = sourceIndIt.next()->indexName().length();
@@ -123,16 +123,16 @@ Status renameCollection(OperationContext* txn,
BackgroundOperation::assertNoBgOpInProgForNs(source.ns());
- Database* const targetDB = dbHolder().openDb(txn, target.db());
+ Database* const targetDB = dbHolder().openDb(opCtx, target.db());
{
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
// Check if the target namespace exists and if dropTarget is true.
// Return a non-OK status if target exists and dropTarget is not true or if the collection
// is sharded.
if (targetDB->getCollection(target)) {
- if (CollectionShardingState::get(txn, target)->getMetadata()) {
+ if (CollectionShardingState::get(opCtx, target)->getMetadata()) {
return {ErrorCodes::IllegalOperation, "cannot rename to a sharded collection"};
}
@@ -140,11 +140,11 @@ Status renameCollection(OperationContext* txn,
return Status(ErrorCodes::NamespaceExists, "target namespace exists");
}
- Status s = targetDB->dropCollection(txn, target.ns());
+ Status s = targetDB->dropCollection(opCtx, target.ns());
if (!s.isOK()) {
return s;
}
- } else if (targetDB->getViewCatalog()->lookup(txn, target.ns())) {
+ } else if (targetDB->getViewCatalog()->lookup(opCtx, target.ns())) {
return Status(ErrorCodes::NamespaceExists,
str::stream() << "a view already exists with that name: " << target.ns());
}
@@ -152,13 +152,13 @@ Status renameCollection(OperationContext* txn,
// If we are renaming in the same database, just
// rename the namespace and we're done.
if (sourceDB == targetDB) {
- Status s = targetDB->renameCollection(txn, source.ns(), target.ns(), stayTemp);
+ Status s = targetDB->renameCollection(opCtx, source.ns(), target.ns(), stayTemp);
if (!s.isOK()) {
return s;
}
getGlobalServiceContext()->getOpObserver()->onRenameCollection(
- txn, NamespaceString(source), NamespaceString(target), dropTarget, stayTemp);
+ opCtx, NamespaceString(source), NamespaceString(target), dropTarget, stayTemp);
wunit.commit();
return Status::OK();
@@ -174,18 +174,18 @@ Status renameCollection(OperationContext* txn,
// TODO use a temp collection and unset the temp flag on success.
Collection* targetColl = nullptr;
{
- CollectionOptions options = sourceColl->getCatalogEntry()->getCollectionOptions(txn);
+ CollectionOptions options = sourceColl->getCatalogEntry()->getCollectionOptions(opCtx);
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
// No logOp necessary because the entire renameCollection command is one logOp.
- bool shouldReplicateWrites = txn->writesAreReplicated();
- txn->setReplicatedWrites(false);
- targetColl = targetDB->createCollection(txn,
+ bool shouldReplicateWrites = opCtx->writesAreReplicated();
+ opCtx->setReplicatedWrites(false);
+ targetColl = targetDB->createCollection(opCtx,
target.ns(),
options,
false); // _id index build with others later.
- txn->setReplicatedWrites(shouldReplicateWrites);
+ opCtx->setReplicatedWrites(shouldReplicateWrites);
if (!targetColl) {
return Status(ErrorCodes::OutOfDiskSpace, "Failed to create target collection.");
}
@@ -194,9 +194,9 @@ Status renameCollection(OperationContext* txn,
}
// Dismissed on success
- ScopeGuard targetCollectionDropper = MakeGuard(dropCollection, txn, targetDB, target.ns());
+ ScopeGuard targetCollectionDropper = MakeGuard(dropCollection, opCtx, targetDB, target.ns());
- MultiIndexBlock indexer(txn, targetColl);
+ MultiIndexBlock indexer(opCtx, targetColl);
indexer.allowInterruption();
std::vector<MultiIndexBlock*> indexers{&indexer};
@@ -204,7 +204,7 @@ Status renameCollection(OperationContext* txn,
{
std::vector<BSONObj> indexesToCopy;
IndexCatalog::IndexIterator sourceIndIt =
- sourceColl->getIndexCatalog()->getIndexIterator(txn, true);
+ sourceColl->getIndexCatalog()->getIndexIterator(opCtx, true);
while (sourceIndIt.more()) {
const BSONObj currIndex = sourceIndIt.next()->infoObj();
@@ -224,18 +224,18 @@ Status renameCollection(OperationContext* txn,
{
// Copy over all the data from source collection to target collection.
- auto cursor = sourceColl->getCursor(txn);
+ auto cursor = sourceColl->getCursor(opCtx);
while (auto record = cursor->next()) {
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
const auto obj = record->data.releaseToBson();
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
// No logOp necessary because the entire renameCollection command is one logOp.
- bool shouldReplicateWrites = txn->writesAreReplicated();
- txn->setReplicatedWrites(false);
- Status status = targetColl->insertDocument(txn, obj, indexers, true);
- txn->setReplicatedWrites(shouldReplicateWrites);
+ bool shouldReplicateWrites = opCtx->writesAreReplicated();
+ opCtx->setReplicatedWrites(false);
+ Status status = targetColl->insertDocument(opCtx, obj, indexers, true);
+ opCtx->setReplicatedWrites(shouldReplicateWrites);
if (!status.isOK())
return status;
wunit.commit();
@@ -249,19 +249,19 @@ Status renameCollection(OperationContext* txn,
{
// Getting here means we successfully built the target copy. We now remove the
// source collection and finalize the rename.
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
- bool shouldReplicateWrites = txn->writesAreReplicated();
- txn->setReplicatedWrites(false);
- Status status = sourceDB->dropCollection(txn, source.ns());
- txn->setReplicatedWrites(shouldReplicateWrites);
+ bool shouldReplicateWrites = opCtx->writesAreReplicated();
+ opCtx->setReplicatedWrites(false);
+ Status status = sourceDB->dropCollection(opCtx, source.ns());
+ opCtx->setReplicatedWrites(shouldReplicateWrites);
if (!status.isOK())
return status;
indexer.commit();
getGlobalServiceContext()->getOpObserver()->onRenameCollection(
- txn, NamespaceString(source), NamespaceString(target), dropTarget, stayTemp);
+ opCtx, NamespaceString(source), NamespaceString(target), dropTarget, stayTemp);
wunit.commit();
}
diff --git a/src/mongo/db/catalog/rename_collection.h b/src/mongo/db/catalog/rename_collection.h
index fb1aa7b5387..c6a3f56b380 100644
--- a/src/mongo/db/catalog/rename_collection.h
+++ b/src/mongo/db/catalog/rename_collection.h
@@ -37,7 +37,7 @@ class OperationContext;
* iff "dropTarget" is true. "stayTemp" indicates whether a collection should maintain its
* temporariness.
*/
-Status renameCollection(OperationContext* txn,
+Status renameCollection(OperationContext* opCtx,
const NamespaceString& source,
const NamespaceString& target,
bool dropTarget,
diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp
index 643a25fda05..339fcdc9d9d 100644
--- a/src/mongo/db/client.cpp
+++ b/src/mongo/db/client.cpp
@@ -128,15 +128,15 @@ ServiceContext::UniqueOperationContext Client::makeOperationContext() {
return getServiceContext()->makeOperationContext(this);
}
-void Client::setOperationContext(OperationContext* txn) {
+void Client::setOperationContext(OperationContext* opCtx) {
// We can only set the OperationContext once before resetting it.
- invariant(txn != NULL && _txn == NULL);
- _txn = txn;
+ invariant(opCtx != NULL && _opCtx == NULL);
+ _opCtx = opCtx;
}
void Client::resetOperationContext() {
- invariant(_txn != NULL);
- _txn = NULL;
+ invariant(_opCtx != NULL);
+ _opCtx = NULL;
}
std::string Client::clientAddress(bool includePort) const {
diff --git a/src/mongo/db/client.h b/src/mongo/db/client.h
index 50d4f5de541..32d8de0fc04 100644
--- a/src/mongo/db/client.h
+++ b/src/mongo/db/client.h
@@ -155,12 +155,12 @@ public:
ServiceContext::UniqueOperationContext makeOperationContext();
/**
- * Sets the active operation context on this client to "txn", which must be non-NULL.
+ * Sets the active operation context on this client to "opCtx", which must be non-NULL.
*
* It is an error to call this method if there is already an operation context on Client.
* It is an error to call this on an unlocked client.
*/
- void setOperationContext(OperationContext* txn);
+ void setOperationContext(OperationContext* opCtx);
/**
* Clears the active operation context on this client.
@@ -177,7 +177,7 @@ public:
* by this method while the client is not locked.
*/
OperationContext* getOperationContext() {
- return _txn;
+ return _opCtx;
}
// TODO(spencer): SERVER-10228 SERVER-14779 Remove this/move it fully into OperationContext.
@@ -224,7 +224,7 @@ private:
bool _inDirectClient = false;
// If != NULL, then contains the currently active OperationContext
- OperationContext* _txn = nullptr;
+ OperationContext* _opCtx = nullptr;
PseudoRandom _prng;
};
diff --git a/src/mongo/db/clientcursor.cpp b/src/mongo/db/clientcursor.cpp
index ce1db6d2954..708a2bc38f7 100644
--- a/src/mongo/db/clientcursor.cpp
+++ b/src/mongo/db/clientcursor.cpp
@@ -148,13 +148,13 @@ void ClientCursor::resetIdleTime() {
_idleAgeMillis = 0;
}
-void ClientCursor::updateSlaveLocation(OperationContext* txn) {
+void ClientCursor::updateSlaveLocation(OperationContext* opCtx) {
if (_slaveReadTill.isNull())
return;
verify(str::startsWith(_ns.c_str(), "local.oplog."));
- Client* c = txn->getClient();
+ Client* c = opCtx->getClient();
verify(c);
OID rid = repl::ReplClientInfo::forClient(c).getRemoteID();
if (!rid.isSet())
@@ -275,10 +275,10 @@ public:
Timer t;
while (!globalInShutdownDeprecated()) {
{
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
cursorStatsTimedOut.increment(
- CursorManager::timeoutCursorsGlobal(&txn, t.millisReset()));
+ CursorManager::timeoutCursorsGlobal(&opCtx, t.millisReset()));
}
sleepsecs(clientCursorMonitorFrequencySecs.load());
}
diff --git a/src/mongo/db/clientcursor.h b/src/mongo/db/clientcursor.h
index 267565ef7dd..44ae4600ac6 100644
--- a/src/mongo/db/clientcursor.h
+++ b/src/mongo/db/clientcursor.h
@@ -188,7 +188,7 @@ public:
// Used to report replication position only in master-slave, so we keep them as TimeStamp rather
// than OpTime.
- void updateSlaveLocation(OperationContext* txn);
+ void updateSlaveLocation(OperationContext* opCtx);
void slaveReadTill(const Timestamp& t) {
_slaveReadTill = t;
diff --git a/src/mongo/db/clientlistplugin.cpp b/src/mongo/db/clientlistplugin.cpp
index 0cd881d869e..73fb06865d3 100644
--- a/src/mongo/db/clientlistplugin.cpp
+++ b/src/mongo/db/clientlistplugin.cpp
@@ -57,7 +57,7 @@ public:
ClientListPlugin() : WebStatusPlugin("clients", 20) {}
virtual void init() {}
- virtual void run(OperationContext* txn, std::stringstream& ss) {
+ virtual void run(OperationContext* opCtx, std::stringstream& ss) {
using namespace html;
ss << "\n<table border=1 cellpadding=2 cellspacing=0>";
@@ -78,7 +78,7 @@ public:
<< "</tr>\n";
- _processAllClients(txn->getClient()->getServiceContext(), ss);
+ _processAllClients(opCtx->getClient()->getServiceContext(), ss);
ss << "</table>\n";
}
@@ -92,23 +92,23 @@ private:
// Make the client stable
stdx::lock_guard<Client> lk(*client);
- const OperationContext* txn = client->getOperationContext();
- if (!txn)
+ const OperationContext* opCtx = client->getOperationContext();
+ if (!opCtx)
continue;
- CurOp* curOp = CurOp::get(txn);
+ CurOp* curOp = CurOp::get(opCtx);
if (!curOp)
continue;
ss << "<tr><td>" << client->desc() << "</td>";
- tablecell(ss, txn->getOpID());
+ tablecell(ss, opCtx->getOpID());
tablecell(ss, true);
// LockState
{
Locker::LockerInfo lockerInfo;
- txn->lockState()->getLockerInfo(&lockerInfo);
+ opCtx->lockState()->getLockerInfo(&lockerInfo);
BSONObjBuilder lockerInfoBuilder;
fillLockerInfo(lockerInfo, lockerInfoBuilder);
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 6aa2f1cdb33..439d3ac574c 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -137,24 +137,25 @@ BSONObj Cloner::getIdIndexSpec(const std::list<BSONObj>& indexSpecs) {
Cloner::Cloner() {}
struct Cloner::Fun {
- Fun(OperationContext* txn, const string& dbName) : lastLog(0), txn(txn), _dbName(dbName) {}
+ Fun(OperationContext* opCtx, const string& dbName)
+ : lastLog(0), opCtx(opCtx), _dbName(dbName) {}
void operator()(DBClientCursorBatchIterator& i) {
invariant(from_collection.coll() != "system.indexes");
// XXX: can probably take dblock instead
- unique_ptr<ScopedTransaction> scopedXact(new ScopedTransaction(txn, MODE_X));
- unique_ptr<Lock::GlobalWrite> globalWriteLock(new Lock::GlobalWrite(txn->lockState()));
+ unique_ptr<ScopedTransaction> scopedXact(new ScopedTransaction(opCtx, MODE_X));
+ unique_ptr<Lock::GlobalWrite> globalWriteLock(new Lock::GlobalWrite(opCtx->lockState()));
uassert(
ErrorCodes::NotMaster,
str::stream() << "Not primary while cloning collection " << from_collection.ns()
<< " to "
<< to_collection.ns(),
- !txn->writesAreReplicated() ||
- repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, to_collection));
+ !opCtx->writesAreReplicated() ||
+ repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, to_collection));
// Make sure database still exists after we resume from the temp release
- Database* db = dbHolder().openDb(txn, _dbName);
+ Database* db = dbHolder().openDb(opCtx, _dbName);
bool createdCollection = false;
Collection* collection = NULL;
@@ -166,10 +167,10 @@ struct Cloner::Fun {
<< "]",
!createdCollection);
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
- WriteUnitOfWork wunit(txn);
- Status s = userCreateNS(txn,
+ WriteUnitOfWork wunit(opCtx);
+ Status s = userCreateNS(opCtx,
db,
to_collection.toString(),
from_options,
@@ -179,7 +180,7 @@ struct Cloner::Fun {
wunit.commit();
collection = db->getCollection(to_collection);
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createCollection", to_collection.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "createCollection", to_collection.ns());
}
const bool isSystemViewsClone = to_collection.isSystemDotViews();
@@ -193,27 +194,27 @@ struct Cloner::Fun {
log() << "clone " << to_collection << ' ' << numSeen;
lastLog = now;
}
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
scopedXact.reset();
globalWriteLock.reset();
- CurOp::get(txn)->yielded();
+ CurOp::get(opCtx)->yielded();
- scopedXact.reset(new ScopedTransaction(txn, MODE_X));
- globalWriteLock.reset(new Lock::GlobalWrite(txn->lockState()));
+ scopedXact.reset(new ScopedTransaction(opCtx, MODE_X));
+ globalWriteLock.reset(new Lock::GlobalWrite(opCtx->lockState()));
// Check if everything is still all right.
- if (txn->writesAreReplicated()) {
+ if (opCtx->writesAreReplicated()) {
uassert(28592,
str::stream() << "Cannot write to ns: " << to_collection.ns()
<< " after yielding",
repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(
- txn, to_collection));
+ opCtx, to_collection));
}
// TODO: SERVER-16598 abort if original db or collection is gone.
- db = dbHolder().get(txn, _dbName);
+ db = dbHolder().get(opCtx, _dbName);
uassert(28593,
str::stream() << "Database " << _dbName << " dropped while cloning",
db != NULL);
@@ -262,13 +263,13 @@ struct Cloner::Fun {
verify(collection);
++numSeen;
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
BSONObj doc = tmp;
OpDebug* const nullOpDebug = nullptr;
- Status status = collection->insertDocument(txn, doc, nullOpDebug, true);
+ Status status = collection->insertDocument(opCtx, doc, nullOpDebug, true);
if (!status.isOK() && status.code() != ErrorCodes::DuplicateKey) {
error() << "error: exception cloning object in " << from_collection << ' '
<< redact(status) << " obj:" << redact(doc);
@@ -278,7 +279,7 @@ struct Cloner::Fun {
wunit.commit();
}
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "cloner insert", to_collection.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "cloner insert", to_collection.ns());
RARELY if (time(0) - saveLast > 60) {
log() << numSeen << " objects cloned so far from collection " << from_collection;
saveLast = time(0);
@@ -299,7 +300,7 @@ struct Cloner::Fun {
}
time_t lastLog;
- OperationContext* txn;
+ OperationContext* opCtx;
const string _dbName;
int64_t numSeen;
@@ -313,7 +314,7 @@ struct Cloner::Fun {
/* copy the specified collection
*/
-void Cloner::copy(OperationContext* txn,
+void Cloner::copy(OperationContext* opCtx,
const string& toDBName,
const NamespaceString& from_collection,
const BSONObj& from_opts,
@@ -324,7 +325,7 @@ void Cloner::copy(OperationContext* txn,
LOG(2) << "\t\tcloning collection " << from_collection << " to " << to_collection << " on "
<< _conn->getServerAddress() << " with filter " << redact(query.toString());
- Fun f(txn, toDBName);
+ Fun f(opCtx, toDBName);
f.numSeen = 0;
f.from_collection = from_collection;
f.from_options = from_opts;
@@ -335,7 +336,7 @@ void Cloner::copy(OperationContext* txn,
int options = QueryOption_NoCursorTimeout | (opts.slaveOk ? QueryOption_SlaveOk : 0);
{
- Lock::TempRelease tempRelease(txn->lockState());
+ Lock::TempRelease tempRelease(opCtx->lockState());
_conn->query(stdx::function<void(DBClientCursorBatchIterator&)>(f),
from_collection.ns(),
query,
@@ -349,11 +350,11 @@ void Cloner::copy(OperationContext* txn,
<< to_collection.ns()
<< " with filter "
<< query.toString(),
- !txn->writesAreReplicated() ||
- repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, to_collection));
+ !opCtx->writesAreReplicated() ||
+ repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, to_collection));
}
-void Cloner::copyIndexes(OperationContext* txn,
+void Cloner::copyIndexes(OperationContext* opCtx,
const string& toDBName,
const NamespaceString& from_collection,
const BSONObj& from_opts,
@@ -372,8 +373,8 @@ void Cloner::copyIndexes(OperationContext* txn,
<< " to "
<< to_collection.ns()
<< " (Cloner)",
- !txn->writesAreReplicated() ||
- repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, to_collection));
+ !opCtx->writesAreReplicated() ||
+ repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, to_collection));
if (indexesToBuild.empty())
@@ -381,16 +382,16 @@ void Cloner::copyIndexes(OperationContext* txn,
// We are under lock here again, so reload the database in case it may have disappeared
// during the temp release
- Database* db = dbHolder().openDb(txn, toDBName);
+ Database* db = dbHolder().openDb(opCtx, toDBName);
Collection* collection = db->getCollection(to_collection);
if (!collection) {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
Status s = userCreateNS(
- txn,
+ opCtx,
db,
to_collection.toString(),
from_opts,
@@ -401,7 +402,7 @@ void Cloner::copyIndexes(OperationContext* txn,
invariant(collection);
wunit.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createCollection", to_collection.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "createCollection", to_collection.ns());
}
// TODO pass the MultiIndexBlock when inserting into the collection rather than building the
@@ -409,7 +410,7 @@ void Cloner::copyIndexes(OperationContext* txn,
// from creation to completion without yielding to ensure the index and the collection
// matches. It also wouldn't work on non-empty collections so we would need both
// implementations anyway as long as that is supported.
- MultiIndexBlock indexer(txn, collection);
+ MultiIndexBlock indexer(opCtx, collection);
indexer.allowInterruption();
indexer.removeExistingIndexes(&indexesToBuild);
@@ -419,20 +420,20 @@ void Cloner::copyIndexes(OperationContext* txn,
auto indexInfoObjs = uassertStatusOK(indexer.init(indexesToBuild));
uassertStatusOK(indexer.insertAllDocumentsInCollection());
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
indexer.commit();
- if (txn->writesAreReplicated()) {
+ if (opCtx->writesAreReplicated()) {
const string targetSystemIndexesCollectionName = to_collection.getSystemIndexesCollection();
const char* createIndexNs = targetSystemIndexesCollectionName.c_str();
for (auto&& infoObj : indexInfoObjs) {
getGlobalServiceContext()->getOpObserver()->onCreateIndex(
- txn, createIndexNs, infoObj, false);
+ opCtx, createIndexNs, infoObj, false);
}
}
wunit.commit();
}
-bool Cloner::copyCollection(OperationContext* txn,
+bool Cloner::copyCollection(OperationContext* opCtx,
const string& ns,
const BSONObj& query,
string& errmsg,
@@ -474,22 +475,22 @@ bool Cloner::copyCollection(OperationContext* txn,
auto sourceIndexes = _conn->getIndexSpecs(nss.ns(), QueryOption_SlaveOk);
auto idIndexSpec = getIdIndexSpec(sourceIndexes);
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbWrite(txn->lockState(), dbname, MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock dbWrite(opCtx->lockState(), dbname, MODE_X);
uassert(ErrorCodes::PrimarySteppedDown,
str::stream() << "Not primary while copying collection " << ns << " (Cloner)",
- !txn->writesAreReplicated() ||
- repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, nss));
+ !opCtx->writesAreReplicated() ||
+ repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, nss));
- Database* db = dbHolder().openDb(txn, dbname);
+ Database* db = dbHolder().openDb(opCtx, dbname);
if (shouldCreateCollection) {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
- WriteUnitOfWork wunit(txn);
- Status status = userCreateNS(txn, db, ns, options, true, idIndexSpec);
+ WriteUnitOfWork wunit(opCtx);
+ Status status = userCreateNS(opCtx, db, ns, options, true, idIndexSpec);
if (!status.isOK()) {
errmsg = status.toString();
// abort write unit of work
@@ -497,7 +498,7 @@ bool Cloner::copyCollection(OperationContext* txn,
}
wunit.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createCollection", ns);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "createCollection", ns);
} else {
LOG(1) << "No collection info found for ns:" << nss.toString()
<< ", host:" << _conn->getServerAddress();
@@ -506,7 +507,7 @@ bool Cloner::copyCollection(OperationContext* txn,
// main data
CloneOptions opts;
opts.slaveOk = true;
- copy(txn, dbname, nss, options, idIndexSpec, nss, opts, Query(query).snapshot());
+ copy(opCtx, dbname, nss, options, idIndexSpec, nss, opts, Query(query).snapshot());
/* TODO : copyIndexes bool does not seem to be implemented! */
if (!shouldCopyIndexes) {
@@ -514,7 +515,7 @@ bool Cloner::copyCollection(OperationContext* txn,
}
// indexes
- copyIndexes(txn, dbname, NamespaceString(ns), options, sourceIndexes, NamespaceString(ns));
+ copyIndexes(opCtx, dbname, NamespaceString(ns), options, sourceIndexes, NamespaceString(ns));
return true;
}
@@ -564,21 +565,21 @@ StatusWith<std::vector<BSONObj>> Cloner::filterCollectionsForClone(
}
Status Cloner::createCollectionsForDb(
- OperationContext* txn,
+ OperationContext* opCtx,
const std::vector<CreateCollectionParams>& createCollectionParams,
const std::string& dbName) {
- Database* db = dbHolder().openDb(txn, dbName);
+ Database* db = dbHolder().openDb(opCtx, dbName);
for (auto&& params : createCollectionParams) {
auto options = params.collectionInfo["options"].Obj();
const NamespaceString nss(dbName, params.collectionName);
uassertStatusOK(userAllowedCreateNS(dbName, params.collectionName));
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- txn->checkForInterrupt();
- WriteUnitOfWork wunit(txn);
+ opCtx->checkForInterrupt();
+ WriteUnitOfWork wunit(opCtx);
Status createStatus =
- userCreateNS(txn,
+ userCreateNS(opCtx,
db,
nss.ns(),
options,
@@ -590,12 +591,12 @@ Status Cloner::createCollectionsForDb(
wunit.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createCollection", nss.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "createCollection", nss.ns());
}
return Status::OK();
}
-Status Cloner::copyDb(OperationContext* txn,
+Status Cloner::copyDb(OperationContext* opCtx,
const std::string& toDBName,
const string& masterHost,
const CloneOptions& opts,
@@ -603,7 +604,7 @@ Status Cloner::copyDb(OperationContext* txn,
std::vector<BSONObj> collectionsToClone) {
massert(10289,
"useReplAuth is not written to replication log",
- !opts.useReplAuth || !txn->writesAreReplicated());
+ !opts.useReplAuth || !opCtx->writesAreReplicated());
auto statusWithMasterHost = ConnectionString::parse(masterHost);
if (!statusWithMasterHost.isOK()) {
@@ -616,7 +617,7 @@ Status Cloner::copyDb(OperationContext* txn,
std::vector<HostAndPort> csServers = cs.getServers();
for (std::vector<HostAndPort>::const_iterator iter = csServers.begin(); iter != csServers.end();
++iter) {
- if (!repl::isSelf(*iter, txn->getServiceContext()))
+ if (!repl::isSelf(*iter, opCtx->getServiceContext()))
continue;
masterSameProcess = true;
@@ -648,7 +649,7 @@ Status Cloner::copyDb(OperationContext* txn,
_conn = std::move(con);
} else {
- _conn.reset(new DBDirectClient(txn));
+ _conn.reset(new DBDirectClient(opCtx));
}
}
@@ -661,7 +662,7 @@ Status Cloner::copyDb(OperationContext* txn,
if (opts.createCollections) {
// getCollectionInfos may make a remote call, which may block indefinitely, so release
// the global lock that we are entering with.
- Lock::TempRelease tempRelease(txn->lockState());
+ Lock::TempRelease tempRelease(opCtx->lockState());
std::list<BSONObj> initialCollections = _conn->getCollectionInfos(
opts.fromDB, ListCollectionsFilter::makeTypeCollectionFilter());
auto status = filterCollectionsForClone(opts, initialCollections);
@@ -687,7 +688,7 @@ Status Cloner::copyDb(OperationContext* txn,
// Get index specs for each collection.
std::map<StringData, std::list<BSONObj>> collectionIndexSpecs;
{
- Lock::TempRelease tempRelease(txn->lockState());
+ Lock::TempRelease tempRelease(opCtx->lockState());
for (auto&& params : createCollectionParams) {
const NamespaceString nss(opts.fromDB, params.collectionName);
auto indexSpecs =
@@ -701,15 +702,16 @@ Status Cloner::copyDb(OperationContext* txn,
}
}
- uassert(ErrorCodes::NotMaster,
- str::stream() << "Not primary while cloning database " << opts.fromDB
- << " (after getting list of collections to clone)",
- !txn->writesAreReplicated() ||
- repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(txn, toDBName));
+ uassert(
+ ErrorCodes::NotMaster,
+ str::stream() << "Not primary while cloning database " << opts.fromDB
+ << " (after getting list of collections to clone)",
+ !opCtx->writesAreReplicated() ||
+ repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(opCtx, toDBName));
if (opts.syncData) {
if (opts.createCollections) {
- Status status = createCollectionsForDb(txn, createCollectionParams, toDBName);
+ Status status = createCollectionsForDb(opCtx, createCollectionParams, toDBName);
if (!status.isOK()) {
return status;
}
@@ -729,7 +731,7 @@ Status Cloner::copyDb(OperationContext* txn,
if (opts.snapshot)
q.snapshot();
- copy(txn,
+ copy(opCtx,
toDBName,
from_name,
params.collectionInfo["options"].Obj(),
@@ -749,7 +751,7 @@ Status Cloner::copyDb(OperationContext* txn,
const NamespaceString to_name(toDBName, params.collectionName);
- copyIndexes(txn,
+ copyIndexes(opCtx,
toDBName,
from_name,
params.collectionInfo["options"].Obj(),
diff --git a/src/mongo/db/cloner.h b/src/mongo/db/cloner.h
index a7f8bdcd50d..6c9abb64b12 100644
--- a/src/mongo/db/cloner.h
+++ b/src/mongo/db/cloner.h
@@ -62,14 +62,14 @@ public:
* that are cloned. When opts.createCollections is true, this parameter is
* ignored and the collection list is fetched from the remote via _conn.
*/
- Status copyDb(OperationContext* txn,
+ Status copyDb(OperationContext* opCtx,
const std::string& toDBName,
const std::string& masterHost,
const CloneOptions& opts,
std::set<std::string>* clonedColls,
std::vector<BSONObj> collectionsToClone = std::vector<BSONObj>());
- bool copyCollection(OperationContext* txn,
+ bool copyCollection(OperationContext* opCtx,
const std::string& ns,
const BSONObj& query,
std::string& errmsg,
@@ -89,7 +89,7 @@ public:
// Executes 'createCollection' for each collection described in 'createCollectionParams', in
// 'dbName'.
- Status createCollectionsForDb(OperationContext* txn,
+ Status createCollectionsForDb(OperationContext* opCtx,
const std::vector<CreateCollectionParams>& createCollectionParams,
const std::string& dbName);
@@ -99,7 +99,7 @@ public:
static BSONObj getIdIndexSpec(const std::list<BSONObj>& indexSpecs);
private:
- void copy(OperationContext* txn,
+ void copy(OperationContext* opCtx,
const std::string& toDBName,
const NamespaceString& from_ns,
const BSONObj& from_opts,
@@ -108,7 +108,7 @@ private:
const CloneOptions& opts,
Query q);
- void copyIndexes(OperationContext* txn,
+ void copyIndexes(OperationContext* opCtx,
const std::string& toDBName,
const NamespaceString& from_ns,
const BSONObj& from_opts,
diff --git a/src/mongo/db/commands.cpp b/src/mongo/db/commands.cpp
index de0f2c72d95..e726bb3918a 100644
--- a/src/mongo/db/commands.cpp
+++ b/src/mongo/db/commands.cpp
@@ -143,7 +143,7 @@ void Command::help(stringstream& help) const {
help << "no help defined";
}
-Status Command::explain(OperationContext* txn,
+Status Command::explain(OperationContext* opCtx,
const string& dbname,
const BSONObj& cmdObj,
ExplainCommon::Verbosity verbosity,
@@ -200,10 +200,10 @@ void Command::appendOperationTime(BSONObjBuilder& result, LogicalTime operationT
result.append("operationTime", operationTime.asTimestamp());
}
-Status Command::checkAuthForOperation(OperationContext* txn,
+Status Command::checkAuthForOperation(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj) {
- return checkAuthForCommand(txn->getClient(), dbname, cmdObj);
+ return checkAuthForCommand(opCtx->getClient(), dbname, cmdObj);
}
Status Command::checkAuthForCommand(Client* client,
@@ -228,18 +228,18 @@ BSONObj Command::getRedactedCopyForLogging(const BSONObj& cmdObj) {
}
static Status _checkAuthorizationImpl(Command* c,
- OperationContext* txn,
+ OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj) {
namespace mmb = mutablebson;
- auto client = txn->getClient();
+ auto client = opCtx->getClient();
if (c->adminOnly() && dbname != "admin") {
return Status(ErrorCodes::Unauthorized,
str::stream() << c->getName()
<< " may only be run against the admin database.");
}
if (AuthorizationSession::get(client)->getAuthorizationManager().isAuthEnabled()) {
- Status status = c->checkAuthForOperation(txn, dbname, cmdObj);
+ Status status = c->checkAuthForOperation(opCtx, dbname, cmdObj);
if (status == ErrorCodes::Unauthorized) {
mmb::Document cmdToLog(cmdObj, mmb::Document::kInPlaceDisabled);
c->redactForLogging(&cmdToLog);
@@ -260,15 +260,15 @@ static Status _checkAuthorizationImpl(Command* c,
}
Status Command::checkAuthorization(Command* c,
- OperationContext* txn,
+ OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj) {
namespace mmb = mutablebson;
- Status status = _checkAuthorizationImpl(c, txn, dbname, cmdObj);
+ Status status = _checkAuthorizationImpl(c, opCtx, dbname, cmdObj);
if (!status.isOK()) {
log(LogComponent::kAccessControl) << status;
}
- audit::logCommandAuthzCheck(txn->getClient(), dbname, cmdObj, c, status.code());
+ audit::logCommandAuthzCheck(opCtx->getClient(), dbname, cmdObj, c, status.code());
return status;
}
@@ -278,7 +278,7 @@ bool Command::isHelpRequest(const BSONElement& helpElem) {
const char Command::kHelpFieldName[] = "help";
-void Command::generateHelpResponse(OperationContext* txn,
+void Command::generateHelpResponse(OperationContext* opCtx,
const rpc::RequestInterface& request,
rpc::ReplyBuilderInterface* replyBuilder,
const Command& command) {
@@ -294,11 +294,11 @@ void Command::generateHelpResponse(OperationContext* txn,
namespace {
-void _generateErrorResponse(OperationContext* txn,
+void _generateErrorResponse(OperationContext* opCtx,
rpc::ReplyBuilderInterface* replyBuilder,
const DBException& exception,
const BSONObj& metadata) {
- Command::registerError(txn, exception);
+ Command::registerError(opCtx, exception);
// We could have thrown an exception after setting fields in the builder,
// so we need to reset it to a clean state just to be sure.
@@ -320,12 +320,12 @@ void _generateErrorResponse(OperationContext* txn,
replyBuilder->setMetadata(metadata);
}
-void _generateErrorResponse(OperationContext* txn,
+void _generateErrorResponse(OperationContext* opCtx,
rpc::ReplyBuilderInterface* replyBuilder,
const DBException& exception,
const BSONObj& metadata,
LogicalTime operationTime) {
- Command::registerError(txn, exception);
+ Command::registerError(opCtx, exception);
// We could have thrown an exception after setting fields in the builder,
// so we need to reset it to a clean state just to be sure.
@@ -352,7 +352,7 @@ void _generateErrorResponse(OperationContext* txn,
} // namespace
-void Command::generateErrorResponse(OperationContext* txn,
+void Command::generateErrorResponse(OperationContext* opCtx,
rpc::ReplyBuilderInterface* replyBuilder,
const DBException& exception,
const rpc::RequestInterface& request,
@@ -365,10 +365,10 @@ void Command::generateErrorResponse(OperationContext* txn,
<< "' metadata '" << request.getMetadata() << "' and operationTime '"
<< operationTime.toString() << "': " << exception.toString();
- _generateErrorResponse(txn, replyBuilder, exception, metadata, operationTime);
+ _generateErrorResponse(opCtx, replyBuilder, exception, metadata, operationTime);
}
-void Command::generateErrorResponse(OperationContext* txn,
+void Command::generateErrorResponse(OperationContext* opCtx,
rpc::ReplyBuilderInterface* replyBuilder,
const DBException& exception,
const rpc::RequestInterface& request,
@@ -380,24 +380,24 @@ void Command::generateErrorResponse(OperationContext* txn,
<< "' "
<< "and metadata '" << request.getMetadata() << "': " << exception.toString();
- _generateErrorResponse(txn, replyBuilder, exception, metadata);
+ _generateErrorResponse(opCtx, replyBuilder, exception, metadata);
}
-void Command::generateErrorResponse(OperationContext* txn,
+void Command::generateErrorResponse(OperationContext* opCtx,
rpc::ReplyBuilderInterface* replyBuilder,
const DBException& exception,
const rpc::RequestInterface& request) {
LOG(1) << "assertion while executing command '" << request.getCommandName() << "' "
<< "on database '" << request.getDatabase() << "': " << exception.toString();
- _generateErrorResponse(txn, replyBuilder, exception, rpc::makeEmptyMetadata());
+ _generateErrorResponse(opCtx, replyBuilder, exception, rpc::makeEmptyMetadata());
}
-void Command::generateErrorResponse(OperationContext* txn,
+void Command::generateErrorResponse(OperationContext* opCtx,
rpc::ReplyBuilderInterface* replyBuilder,
const DBException& exception) {
LOG(1) << "assertion while executing command: " << exception.toString();
- _generateErrorResponse(txn, replyBuilder, exception, rpc::makeEmptyMetadata());
+ _generateErrorResponse(opCtx, replyBuilder, exception, rpc::makeEmptyMetadata());
}
namespace {
@@ -433,8 +433,8 @@ void Command::registerRegisterError(
registeredRegisterErrorHandler = std::move(handler);
}
-void Command::registerError(OperationContext* const txn, const DBException& exception) {
- registeredRegisterErrorHandler(txn, exception);
+void Command::registerError(OperationContext* const opCtx, const DBException& exception) {
+ registeredRegisterErrorHandler(opCtx, exception);
}
namespace {
@@ -445,11 +445,11 @@ stdx::function<Command::ExecCommandHandler> execCommandHandler =
rpc::ReplyBuilderInterface* const) { invariant(false); };
} // namespace
-void Command::execCommand(OperationContext* const txn,
+void Command::execCommand(OperationContext* const opCtx,
Command* const command,
const rpc::RequestInterface& request,
rpc::ReplyBuilderInterface* const replyBuilder) {
- execCommandHandler(txn, command, request, replyBuilder);
+ execCommandHandler(opCtx, command, request, replyBuilder);
}
void Command::registerExecCommand(stdx::function<Command::ExecCommandHandler> handler) {
diff --git a/src/mongo/db/commands.h b/src/mongo/db/commands.h
index b853c7a2a94..832a8e7c4b8 100644
--- a/src/mongo/db/commands.h
+++ b/src/mongo/db/commands.h
@@ -134,7 +134,7 @@ public:
return value is true if succeeded. if false, set errmsg text.
*/
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& db,
BSONObj& cmdObj,
int options,
@@ -147,7 +147,7 @@ public:
* Then we won't need to mutate the command object. At that point we can also make
* this method virtual so commands can override it directly.
*/
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const rpc::RequestInterface& request,
rpc::ReplyBuilderInterface* replyBuilder);
@@ -210,11 +210,11 @@ public:
* which knows how to convert an execution stage tree into explain output.
*
* TODO: Remove the 'serverSelectionMetadata' parameter in favor of reading the
- * ServerSelectionMetadata off 'txn'. Once OP_COMMAND is implemented in mongos, this metadata
+ * ServerSelectionMetadata off 'opCtx'. Once OP_COMMAND is implemented in mongos, this metadata
* will be parsed and attached as a decoration on the OperationContext, as is already done on
* the mongod side.
*/
- virtual Status explain(OperationContext* txn,
+ virtual Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
ExplainCommon::Verbosity verbosity,
@@ -222,10 +222,11 @@ public:
BSONObjBuilder* out) const;
/**
- * Checks if the client associated with the given OperationContext, "txn", is authorized to run
+ * Checks if the client associated with the given OperationContext, "opCtx", is authorized to
+ * run
* this command on database "dbname" with the invocation described by "cmdObj".
*/
- virtual Status checkAuthForOperation(OperationContext* txn,
+ virtual Status checkAuthForOperation(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj);
@@ -313,7 +314,7 @@ public:
*
* This is currently used by mongod and dbwebserver.
*/
- static void execCommand(OperationContext* txn,
+ static void execCommand(OperationContext* opCtx,
Command* command,
const rpc::RequestInterface& request,
rpc::ReplyBuilderInterface* replyBuilder);
@@ -388,7 +389,7 @@ public:
* Generates a reply from the 'help' information associated with a command. The state of
* the passed ReplyBuilder will be in kOutputDocs after calling this method.
*/
- static void generateHelpResponse(OperationContext* txn,
+ static void generateHelpResponse(OperationContext* opCtx,
const rpc::RequestInterface& request,
rpc::ReplyBuilderInterface* replyBuilder,
const Command& command);
@@ -400,7 +401,7 @@ public:
* already an active exception when this function is called, so there
* is little that can be done if it fails.
*/
- static void generateErrorResponse(OperationContext* txn,
+ static void generateErrorResponse(OperationContext* opCtx,
rpc::ReplyBuilderInterface* replyBuilder,
const DBException& exception,
const rpc::RequestInterface& request,
@@ -411,7 +412,7 @@ public:
* Generates a command error response. This overload of generateErrorResponse is intended
* to also add an operationTime.
*/
- static void generateErrorResponse(OperationContext* txn,
+ static void generateErrorResponse(OperationContext* opCtx,
rpc::ReplyBuilderInterface* replyBuilder,
const DBException& exception,
const rpc::RequestInterface& request,
@@ -424,7 +425,7 @@ public:
* a handle to the actual Command object. This can happen, for example, when the command
* is not found.
*/
- static void generateErrorResponse(OperationContext* txn,
+ static void generateErrorResponse(OperationContext* opCtx,
rpc::ReplyBuilderInterface* replyBuilder,
const DBException& exception,
const rpc::RequestInterface& request);
@@ -435,7 +436,7 @@ public:
* neccessary, for example, if there is
* an assertion hit while parsing the command.
*/
- static void generateErrorResponse(OperationContext* txn,
+ static void generateErrorResponse(OperationContext* opCtx,
rpc::ReplyBuilderInterface* replyBuilder,
const DBException& exception);
@@ -443,7 +444,7 @@ public:
* Records the error on to the OperationContext. This hook is needed because mongos
* does not have CurOp linked in to it.
*/
- static void registerError(OperationContext* txn, const DBException& exception);
+ static void registerError(OperationContext* opCtx, const DBException& exception);
/**
* Registers the implementation of the `registerError` function. This hook is needed because
@@ -459,7 +460,7 @@ public:
static bool isUserManagementCommand(const std::string& name);
/**
- * Checks to see if the client executing "txn" is authorized to run the given command with the
+ * Checks to see if the client executing "opCtx" is authorized to run the given command with the
* given parameters on the given named database.
*
* Returns Status::OK() if the command is authorized. Most likely returns
@@ -505,14 +506,14 @@ private:
ServerStatusMetricField<Counter64> _commandsExecutedMetric;
ServerStatusMetricField<Counter64> _commandsFailedMetric;
- friend void mongo::execCommandClient(OperationContext* txn,
+ friend void mongo::execCommandClient(OperationContext* opCtx,
Command* c,
int queryOptions,
const char* ns,
BSONObj& cmdObj,
BSONObjBuilder& result);
- friend void mongo::execCommandDatabase(OperationContext* txn,
+ friend void mongo::execCommandDatabase(OperationContext* opCtx,
Command* command,
const rpc::RequestInterface& request,
rpc::ReplyBuilderInterface* replyBuilder);
diff --git a/src/mongo/db/commands/apply_ops_cmd.cpp b/src/mongo/db/commands/apply_ops_cmd.cpp
index c252b8fd073..233fcc744e3 100644
--- a/src/mongo/db/commands/apply_ops_cmd.cpp
+++ b/src/mongo/db/commands/apply_ops_cmd.cpp
@@ -80,13 +80,13 @@ public:
}
- virtual Status checkAuthForOperation(OperationContext* txn,
+ virtual Status checkAuthForOperation(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj) {
- return checkAuthForApplyOpsCommand(txn, dbname, cmdObj);
+ return checkAuthForApplyOpsCommand(opCtx, dbname, cmdObj);
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -96,7 +96,7 @@ public:
boost::optional<DisableDocumentValidation> maybeDisableValidation;
if (shouldBypassDocumentValidationForCommand(cmdObj))
- maybeDisableValidation.emplace(txn);
+ maybeDisableValidation.emplace(opCtx);
if (cmdObj.firstElement().type() != Array) {
errmsg = "ops has to be an array";
@@ -116,14 +116,14 @@ public:
}
}
- auto client = txn->getClient();
+ auto client = opCtx->getClient();
auto lastOpAtOperationStart = repl::ReplClientInfo::forClient(client).getLastOp();
ScopeGuard lastOpSetterGuard =
MakeObjGuard(repl::ReplClientInfo::forClient(client),
&repl::ReplClientInfo::setLastOpToSystemLastOpTime,
- txn);
+ opCtx);
- auto applyOpsStatus = appendCommandStatus(result, applyOps(txn, dbname, cmdObj, &result));
+ auto applyOpsStatus = appendCommandStatus(result, applyOps(opCtx, dbname, cmdObj, &result));
if (repl::ReplClientInfo::forClient(client).getLastOp() != lastOpAtOperationStart) {
// If this operation has already generated a new lastOp, don't bother setting it
diff --git a/src/mongo/db/commands/apply_ops_cmd_common.cpp b/src/mongo/db/commands/apply_ops_cmd_common.cpp
index ebacbdfd476..55b463abc04 100644
--- a/src/mongo/db/commands/apply_ops_cmd_common.cpp
+++ b/src/mongo/db/commands/apply_ops_cmd_common.cpp
@@ -46,11 +46,11 @@ namespace mongo {
namespace {
-Status checkOperationAuthorization(OperationContext* txn,
+Status checkOperationAuthorization(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& oplogEntry,
bool alwaysUpsert) {
- AuthorizationSession* authSession = AuthorizationSession::get(txn->getClient());
+ AuthorizationSession* authSession = AuthorizationSession::get(opCtx->getClient());
BSONElement opTypeElem = oplogEntry["op"];
checkBSONType(BSONType::String, opTypeElem);
@@ -79,11 +79,11 @@ Status checkOperationAuthorization(OperationContext* txn,
return Status(ErrorCodes::FailedToParse, "Unrecognized command in op");
}
- return Command::checkAuthorization(command, txn, dbname, o);
+ return Command::checkAuthorization(command, opCtx, dbname, o);
}
if (opType == "i"_sd) {
- return authSession->checkAuthForInsert(txn, ns, o);
+ return authSession->checkAuthForInsert(opCtx, ns, o);
} else if (opType == "u"_sd) {
BSONElement o2Elem = oplogEntry["o2"];
checkBSONType(BSONType::Object, o2Elem);
@@ -97,10 +97,10 @@ Status checkOperationAuthorization(OperationContext* txn,
const bool upsert = b || alwaysUpsert;
- return authSession->checkAuthForUpdate(txn, ns, o, o2, upsert);
+ return authSession->checkAuthForUpdate(opCtx, ns, o, o2, upsert);
} else if (opType == "d"_sd) {
- return authSession->checkAuthForDelete(txn, ns, o);
+ return authSession->checkAuthForDelete(opCtx, ns, o);
} else if (opType == "db"_sd) {
// It seems that 'db' isn't used anymore. Require all actions to prevent casual use.
ActionSet allActions;
@@ -175,10 +175,10 @@ ApplyOpsValidity validateApplyOpsCommand(const BSONObj& cmdObj) {
return ApplyOpsValidity::kOk;
}
-Status checkAuthForApplyOpsCommand(OperationContext* txn,
+Status checkAuthForApplyOpsCommand(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj) {
- AuthorizationSession* authSession = AuthorizationSession::get(txn->getClient());
+ AuthorizationSession* authSession = AuthorizationSession::get(opCtx->getClient());
ApplyOpsValidity validity = validateApplyOpsCommand(cmdObj);
if (validity == ApplyOpsValidity::kNeedsSuperuser) {
@@ -193,7 +193,7 @@ Status checkAuthForApplyOpsCommand(OperationContext* txn,
boost::optional<DisableDocumentValidation> maybeDisableValidation;
if (shouldBypassDocumentValidationForCommand(cmdObj))
- maybeDisableValidation.emplace(txn);
+ maybeDisableValidation.emplace(opCtx);
const bool alwaysUpsert =
@@ -202,7 +202,7 @@ Status checkAuthForApplyOpsCommand(OperationContext* txn,
checkBSONType(BSONType::Array, cmdObj.firstElement());
for (const BSONElement& e : cmdObj.firstElement().Array()) {
checkBSONType(BSONType::Object, e);
- Status status = checkOperationAuthorization(txn, dbname, e.Obj(), alwaysUpsert);
+ Status status = checkOperationAuthorization(opCtx, dbname, e.Obj(), alwaysUpsert);
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/db/commands/apply_ops_cmd_common.h b/src/mongo/db/commands/apply_ops_cmd_common.h
index 443e862b798..f9fb5ec9823 100644
--- a/src/mongo/db/commands/apply_ops_cmd_common.h
+++ b/src/mongo/db/commands/apply_ops_cmd_common.h
@@ -39,7 +39,7 @@ class Status;
/**
* Returns Status::OK if the associated client is authorized to perform the command in cmdObj.
*/
-Status checkAuthForApplyOpsCommand(OperationContext* txn,
+Status checkAuthForApplyOpsCommand(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj);
diff --git a/src/mongo/db/commands/authentication_commands.cpp b/src/mongo/db/commands/authentication_commands.cpp
index 8de3931d529..95a92d89e3a 100644
--- a/src/mongo/db/commands/authentication_commands.cpp
+++ b/src/mongo/db/commands/authentication_commands.cpp
@@ -113,7 +113,7 @@ public:
virtual void addRequiredPrivileges(const std::string& dbname,
const BSONObj& cmdObj,
std::vector<Privilege>* out) {} // No auth required
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
@@ -151,7 +151,7 @@ void CmdAuthenticate::redactForLogging(mutablebson::Document* cmdObj) {
}
}
-bool CmdAuthenticate::run(OperationContext* txn,
+bool CmdAuthenticate::run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -167,7 +167,7 @@ bool CmdAuthenticate::run(OperationContext* txn,
mechanism = "MONGODB-CR";
}
UserName user;
- auto& sslPeerInfo = SSLPeerInfo::forSession(txn->getClient()->session());
+ auto& sslPeerInfo = SSLPeerInfo::forSession(opCtx->getClient()->session());
if (mechanism == "MONGODB-X509" && !cmdObj.hasField("user")) {
user = UserName(sslPeerInfo.subjectName, dbname);
} else {
@@ -182,7 +182,7 @@ bool CmdAuthenticate::run(OperationContext* txn,
user = internalSecurity.user->getName();
}
- Status status = _authenticate(txn, mechanism, user, cmdObj);
+ Status status = _authenticate(opCtx, mechanism, user, cmdObj);
audit::logAuthentication(Client::getCurrent(), mechanism, user, status.code());
if (!status.isOK()) {
if (!serverGlobalParams.quiet.load()) {
@@ -204,22 +204,22 @@ bool CmdAuthenticate::run(OperationContext* txn,
return true;
}
-Status CmdAuthenticate::_authenticate(OperationContext* txn,
+Status CmdAuthenticate::_authenticate(OperationContext* opCtx,
const std::string& mechanism,
const UserName& user,
const BSONObj& cmdObj) {
if (mechanism == "MONGODB-CR") {
- return _authenticateCR(txn, user, cmdObj);
+ return _authenticateCR(opCtx, user, cmdObj);
}
#ifdef MONGO_CONFIG_SSL
if (mechanism == "MONGODB-X509") {
- return _authenticateX509(txn, user, cmdObj);
+ return _authenticateX509(opCtx, user, cmdObj);
}
#endif
return Status(ErrorCodes::BadValue, "Unsupported mechanism: " + mechanism);
}
-Status CmdAuthenticate::_authenticateCR(OperationContext* txn,
+Status CmdAuthenticate::_authenticateCR(OperationContext* opCtx,
const UserName& user,
const BSONObj& cmdObj) {
if (user == internalSecurity.user->getName() &&
@@ -265,7 +265,7 @@ Status CmdAuthenticate::_authenticateCR(OperationContext* txn,
}
User* userObj;
- Status status = getGlobalAuthorizationManager()->acquireUser(txn, user, &userObj);
+ Status status = getGlobalAuthorizationManager()->acquireUser(opCtx, user, &userObj);
if (!status.isOK()) {
// Failure to find the privilege document indicates no-such-user, a fact that we do not
// wish to reveal to the client. So, we return AuthenticationFailed rather than passing
@@ -298,7 +298,7 @@ Status CmdAuthenticate::_authenticateCR(OperationContext* txn,
}
AuthorizationSession* authorizationSession = AuthorizationSession::get(Client::getCurrent());
- status = authorizationSession->addAndAuthorizeUser(txn, user);
+ status = authorizationSession->addAndAuthorizeUser(opCtx, user);
if (!status.isOK()) {
return status;
}
@@ -307,7 +307,7 @@ Status CmdAuthenticate::_authenticateCR(OperationContext* txn,
}
#ifdef MONGO_CONFIG_SSL
-Status CmdAuthenticate::_authenticateX509(OperationContext* txn,
+Status CmdAuthenticate::_authenticateX509(OperationContext* opCtx,
const UserName& user,
const BSONObj& cmdObj) {
if (!getSSLManager()) {
@@ -348,7 +348,7 @@ Status CmdAuthenticate::_authenticateX509(OperationContext* txn,
if (_isX509AuthDisabled) {
return Status(ErrorCodes::BadValue, _x509AuthenticationDisabledMessage);
}
- Status status = authorizationSession->addAndAuthorizeUser(txn, user);
+ Status status = authorizationSession->addAndAuthorizeUser(opCtx, user);
if (!status.isOK()) {
return status;
}
@@ -374,7 +374,7 @@ public:
return false;
}
CmdLogout() : Command("logout") {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/db/commands/authentication_commands.h b/src/mongo/db/commands/authentication_commands.h
index 4b1caf54913..fddfcfdb1eb 100644
--- a/src/mongo/db/commands/authentication_commands.h
+++ b/src/mongo/db/commands/authentication_commands.h
@@ -55,7 +55,7 @@ public:
virtual void redactForLogging(mutablebson::Document* cmdObj);
CmdAuthenticate() : Command("authenticate") {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -75,12 +75,12 @@ private:
* mechanism, and ProtocolError, indicating an error in the use of the authentication
* protocol.
*/
- Status _authenticate(OperationContext* txn,
+ Status _authenticate(OperationContext* opCtx,
const std::string& mechanism,
const UserName& user,
const BSONObj& cmdObj);
- Status _authenticateCR(OperationContext* txn, const UserName& user, const BSONObj& cmdObj);
- Status _authenticateX509(OperationContext* txn, const UserName& user, const BSONObj& cmdObj);
+ Status _authenticateCR(OperationContext* opCtx, const UserName& user, const BSONObj& cmdObj);
+ Status _authenticateX509(OperationContext* opCtx, const UserName& user, const BSONObj& cmdObj);
};
extern CmdAuthenticate cmdAuthenticate;
diff --git a/src/mongo/db/commands/clone.cpp b/src/mongo/db/commands/clone.cpp
index 60b9d031dc1..8548152087d 100644
--- a/src/mongo/db/commands/clone.cpp
+++ b/src/mongo/db/commands/clone.cpp
@@ -86,7 +86,7 @@ public:
return Status::OK();
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -94,7 +94,7 @@ public:
BSONObjBuilder& result) {
boost::optional<DisableDocumentValidation> maybeDisableValidation;
if (shouldBypassDocumentValidationForCommand(cmdObj)) {
- maybeDisableValidation.emplace(txn);
+ maybeDisableValidation.emplace(opCtx);
}
string from = cmdObj.getStringField("clone");
@@ -119,11 +119,11 @@ public:
set<string> clonedColls;
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbXLock(txn->lockState(), dbname, MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock dbXLock(opCtx->lockState(), dbname, MODE_X);
Cloner cloner;
- Status status = cloner.copyDb(txn, dbname, from, opts, &clonedColls);
+ Status status = cloner.copyDb(opCtx, dbname, from, opts, &clonedColls);
BSONArrayBuilder barr;
barr.append(clonedColls);
diff --git a/src/mongo/db/commands/clone_collection.cpp b/src/mongo/db/commands/clone_collection.cpp
index 512c9d7b737..d920ac62c05 100644
--- a/src/mongo/db/commands/clone_collection.cpp
+++ b/src/mongo/db/commands/clone_collection.cpp
@@ -103,7 +103,7 @@ public:
"is placed at the same db.collection (namespace) as the source.\n";
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -111,7 +111,7 @@ public:
BSONObjBuilder& result) {
boost::optional<DisableDocumentValidation> maybeDisableValidation;
if (shouldBypassDocumentValidationForCommand(cmdObj))
- maybeDisableValidation.emplace(txn);
+ maybeDisableValidation.emplace(opCtx);
string fromhost = cmdObj.getStringField("from");
if (fromhost.empty()) {
@@ -121,7 +121,7 @@ public:
{
HostAndPort h(fromhost);
- if (repl::isSelf(h, txn->getServiceContext())) {
+ if (repl::isSelf(h, opCtx->getServiceContext())) {
errmsg = "can't cloneCollection from self";
return false;
}
@@ -152,7 +152,7 @@ public:
cloner.setConnection(myconn.release());
- return cloner.copyCollection(txn, collection, query, errmsg, copyIndexes);
+ return cloner.copyCollection(opCtx, collection, query, errmsg, copyIndexes);
}
} cmdCloneCollection;
diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp
index 373147da069..222e6d8887f 100644
--- a/src/mongo/db/commands/collection_to_capped.cpp
+++ b/src/mongo/db/commands/collection_to_capped.cpp
@@ -84,7 +84,7 @@ public:
out->push_back(Privilege(ResourcePattern::forExactNamespace(nss), targetActions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& jsobj,
int,
@@ -118,11 +118,11 @@ public:
return false;
}
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetDb autoDb(txn, dbname, MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ AutoGetDb autoDb(opCtx, dbname, MODE_X);
NamespaceString nss(dbname, to);
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, nss)) {
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, nss)) {
return appendCommandStatus(
result,
Status(ErrorCodes::NotMaster,
@@ -140,7 +140,7 @@ public:
}
Status status =
- cloneCollectionAsCapped(txn, db, from.toString(), to.toString(), size, temp);
+ cloneCollectionAsCapped(opCtx, db, from.toString(), to.toString(), size, temp);
return appendCommandStatus(result, status);
}
} cmdCloneCollectionAsCapped;
@@ -170,7 +170,7 @@ public:
out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& jsobj,
int,
@@ -184,7 +184,7 @@ public:
return false;
}
- return appendCommandStatus(result, convertToCapped(txn, nss, size));
+ return appendCommandStatus(result, convertToCapped(opCtx, nss, size));
}
} cmdConvertToCapped;
diff --git a/src/mongo/db/commands/compact.cpp b/src/mongo/db/commands/compact.cpp
index b53fcda0b65..e93b94a5892 100644
--- a/src/mongo/db/commands/compact.cpp
+++ b/src/mongo/db/commands/compact.cpp
@@ -86,7 +86,7 @@ public:
}
CompactCmd() : Command("compact") {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& db,
BSONObj& cmdObj,
int,
@@ -144,13 +144,13 @@ public:
if (cmdObj.hasElement("validate"))
compactOptions.validateDocuments = cmdObj["validate"].trueValue();
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetDb autoDb(txn, db, MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ AutoGetDb autoDb(opCtx, db, MODE_X);
Database* const collDB = autoDb.getDb();
Collection* collection = collDB ? collDB->getCollection(nss) : nullptr;
auto view =
- collDB && !collection ? collDB->getViewCatalog()->lookup(txn, nss.ns()) : nullptr;
+ collDB && !collection ? collDB->getViewCatalog()->lookup(opCtx, nss.ns()) : nullptr;
// If db/collection does not exist, short circuit and return.
if (!collDB || !collection) {
@@ -162,12 +162,12 @@ public:
result, {ErrorCodes::NamespaceNotFound, "collection does not exist"});
}
- OldClientContext ctx(txn, nss.ns());
+ OldClientContext ctx(opCtx, nss.ns());
BackgroundOperation::assertNoBgOpInProgForNs(nss.ns());
log() << "compact " << nss.ns() << " begin, options: " << compactOptions;
- StatusWith<CompactStats> status = collection->compact(txn, &compactOptions);
+ StatusWith<CompactStats> status = collection->compact(opCtx, &compactOptions);
if (!status.isOK())
return appendCommandStatus(result, status.getStatus());
diff --git a/src/mongo/db/commands/conn_pool_stats.cpp b/src/mongo/db/commands/conn_pool_stats.cpp
index 2cd000e7d30..1d23df5060d 100644
--- a/src/mongo/db/commands/conn_pool_stats.cpp
+++ b/src/mongo/db/commands/conn_pool_stats.cpp
@@ -69,7 +69,7 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string&,
mongo::BSONObj&,
int,
@@ -83,13 +83,13 @@ public:
result.appendNumber("numAScopedConnections", AScopedConnection::getNumConnections());
// Replication connections, if we have them.
- auto replCoord = repl::ReplicationCoordinator::get(txn);
+ auto replCoord = repl::ReplicationCoordinator::get(opCtx);
if (replCoord && replCoord->isReplEnabled()) {
replCoord->appendConnectionStats(&stats);
}
// Sharding connections, if we have any.
- auto grid = Grid::get(txn);
+ auto grid = Grid::get(opCtx);
if (grid->shardRegistry()) {
grid->getExecutorPool()->appendConnectionStats(&stats);
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
diff --git a/src/mongo/db/commands/conn_pool_sync.cpp b/src/mongo/db/commands/conn_pool_sync.cpp
index cb9410d7619..e3318079efd 100644
--- a/src/mongo/db/commands/conn_pool_sync.cpp
+++ b/src/mongo/db/commands/conn_pool_sync.cpp
@@ -54,7 +54,7 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string&,
mongo::BSONObj&,
int,
diff --git a/src/mongo/db/commands/connection_status.cpp b/src/mongo/db/commands/connection_status.cpp
index e26e6b9d192..651a84327fa 100644
--- a/src/mongo/db/commands/connection_status.cpp
+++ b/src/mongo/db/commands/connection_status.cpp
@@ -55,7 +55,7 @@ public:
h << "Returns connection-specific information such as logged-in users and their roles";
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
diff --git a/src/mongo/db/commands/copydb.cpp b/src/mongo/db/commands/copydb.cpp
index 00f26f8bd4d..b13066949df 100644
--- a/src/mongo/db/commands/copydb.cpp
+++ b/src/mongo/db/commands/copydb.cpp
@@ -114,7 +114,7 @@ public:
<< "[, slaveOk: <bool>, username: <username>, nonce: <nonce>, key: <key>]}";
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -122,7 +122,7 @@ public:
BSONObjBuilder& result) {
boost::optional<DisableDocumentValidation> maybeDisableValidation;
if (shouldBypassDocumentValidationForCommand(cmdObj))
- maybeDisableValidation.emplace(txn);
+ maybeDisableValidation.emplace(opCtx);
string fromhost = cmdObj.getStringField("fromhost");
bool fromSelf = fromhost.empty();
@@ -171,7 +171,7 @@ public:
string nonce = cmdObj.getStringField("nonce");
string key = cmdObj.getStringField("key");
- auto& authConn = CopyDbAuthConnection::forClient(txn->getClient());
+ auto& authConn = CopyDbAuthConnection::forClient(opCtx->getClient());
if (!username.empty() && !nonce.empty() && !key.empty()) {
uassert(13008, "must call copydbgetnonce first", authConn.get());
@@ -226,13 +226,13 @@ public:
if (fromSelf) {
// SERVER-4328 todo lock just the two db's not everything for the fromself case
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
- uassertStatusOK(cloner.copyDb(txn, todb, fromhost, cloneOptions, NULL));
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite lk(opCtx->lockState());
+ uassertStatusOK(cloner.copyDb(opCtx, todb, fromhost, cloneOptions, NULL));
} else {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock lk(txn->lockState(), todb, MODE_X);
- uassertStatusOK(cloner.copyDb(txn, todb, fromhost, cloneOptions, NULL));
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock lk(opCtx->lockState(), todb, MODE_X);
+ uassertStatusOK(cloner.copyDb(opCtx, todb, fromhost, cloneOptions, NULL));
}
return true;
diff --git a/src/mongo/db/commands/copydb_start_commands.cpp b/src/mongo/db/commands/copydb_start_commands.cpp
index 3dc9769024a..f7a8949ca42 100644
--- a/src/mongo/db/commands/copydb_start_commands.cpp
+++ b/src/mongo/db/commands/copydb_start_commands.cpp
@@ -96,7 +96,7 @@ public:
help << "usage: {copydbgetnonce: 1, fromhost: <hostname>}";
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
@@ -112,7 +112,7 @@ public:
const ConnectionString cs(uassertStatusOK(ConnectionString::parse(fromhost)));
- auto& authConn = CopyDbAuthConnection::forClient(txn->getClient());
+ auto& authConn = CopyDbAuthConnection::forClient(opCtx->getClient());
authConn.reset(cs.connect(StringData(), errmsg));
if (!authConn) {
return false;
@@ -170,7 +170,7 @@ public:
"from secure server\n";
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
@@ -209,7 +209,7 @@ public:
return false;
}
- auto& authConn = CopyDbAuthConnection::forClient(txn->getClient());
+ auto& authConn = CopyDbAuthConnection::forClient(opCtx->getClient());
authConn.reset(cs.connect(StringData(), errmsg));
if (!authConn.get()) {
return false;
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp
index 8c1bb515738..7ecbe2f08ea 100644
--- a/src/mongo/db/commands/count_cmd.cpp
+++ b/src/mongo/db/commands/count_cmd.cpp
@@ -101,7 +101,7 @@ public:
out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
}
- virtual Status explain(OperationContext* txn,
+ virtual Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
ExplainCommon::Verbosity verbosity,
@@ -122,7 +122,7 @@ public:
}
// Acquire the db read lock.
- AutoGetCollectionOrViewForRead ctx(txn, request.getValue().getNs());
+ AutoGetCollectionOrViewForRead ctx(opCtx, request.getValue().getNs());
Collection* collection = ctx.getCollection();
if (ctx.getView()) {
@@ -135,7 +135,7 @@ public:
std::string errmsg;
(void)Command::findCommand("aggregate")
- ->run(txn, dbname, viewAggregation.getValue(), 0, errmsg, *out);
+ ->run(opCtx, dbname, viewAggregation.getValue(), 0, errmsg, *out);
return Status::OK();
}
@@ -143,7 +143,7 @@ public:
// version on initial entry into count.
RangePreserver preserver(collection);
- auto statusWithPlanExecutor = getExecutorCount(txn,
+ auto statusWithPlanExecutor = getExecutorCount(opCtx,
collection,
request.getValue(),
true, // explain
@@ -158,7 +158,7 @@ public:
return Status::OK();
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -180,7 +180,7 @@ public:
"http://dochub.mongodb.org/core/3.4-feature-compatibility."));
}
- AutoGetCollectionOrViewForRead ctx(txn, request.getValue().getNs());
+ AutoGetCollectionOrViewForRead ctx(opCtx, request.getValue().getNs());
Collection* collection = ctx.getCollection();
if (ctx.getView()) {
@@ -193,7 +193,7 @@ public:
BSONObjBuilder aggResult;
(void)Command::findCommand("aggregate")
- ->run(txn, dbname, viewAggregation.getValue(), options, errmsg, aggResult);
+ ->run(opCtx, dbname, viewAggregation.getValue(), options, errmsg, aggResult);
if (ResolvedView::isResolvedViewErrorResponse(aggResult.asTempObj())) {
result.appendElements(aggResult.obj());
@@ -212,7 +212,7 @@ public:
// version on initial entry into count.
RangePreserver preserver(collection);
- auto statusWithPlanExecutor = getExecutorCount(txn,
+ auto statusWithPlanExecutor = getExecutorCount(opCtx,
collection,
request.getValue(),
false, // !explain
@@ -224,9 +224,9 @@ public:
unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
// Store the plan summary string in CurOp.
- auto curOp = CurOp::get(txn);
+ auto curOp = CurOp::get(opCtx);
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
}
@@ -238,7 +238,7 @@ public:
PlanSummaryStats summaryStats;
Explain::getSummaryStats(*exec, &summaryStats);
if (collection) {
- collection->infoCache()->notifyOfQuery(txn, summaryStats.indexesUsed);
+ collection->infoCache()->notifyOfQuery(opCtx, summaryStats.indexesUsed);
}
curOp->debug().setPlanSummaryMetrics(summaryStats);
diff --git a/src/mongo/db/commands/cpuprofile.cpp b/src/mongo/db/commands/cpuprofile.cpp
index 24fbd034b81..608a626aa92 100644
--- a/src/mongo/db/commands/cpuprofile.cpp
+++ b/src/mongo/db/commands/cpuprofile.cpp
@@ -103,7 +103,7 @@ class CpuProfilerStartCommand : public CpuProfilerCommand {
public:
CpuProfilerStartCommand() : CpuProfilerCommand(commandName) {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
std::string const& db,
BSONObj& cmdObj,
int options,
@@ -120,7 +120,7 @@ class CpuProfilerStopCommand : public CpuProfilerCommand {
public:
CpuProfilerStopCommand() : CpuProfilerCommand(commandName) {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
std::string const& db,
BSONObj& cmdObj,
int options,
@@ -133,16 +133,16 @@ public:
char const* const CpuProfilerStartCommand::commandName = "_cpuProfilerStart";
char const* const CpuProfilerStopCommand::commandName = "_cpuProfilerStop";
-bool CpuProfilerStartCommand::run(OperationContext* txn,
+bool CpuProfilerStartCommand::run(OperationContext* opCtx,
std::string const& db,
BSONObj& cmdObj,
int options,
std::string& errmsg,
BSONObjBuilder& result) {
// The DB lock here is just so we have IX on the global lock in order to prevent shutdown
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbXLock(txn->lockState(), db, MODE_X);
- OldClientContext ctx(txn, db, false /* no shard version checking */);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock dbXLock(opCtx->lockState(), db, MODE_X);
+ OldClientContext ctx(opCtx, db, false /* no shard version checking */);
std::string profileFilename = cmdObj[commandName]["profileFilename"].String();
if (!::ProfilerStart(profileFilename.c_str())) {
@@ -152,16 +152,16 @@ bool CpuProfilerStartCommand::run(OperationContext* txn,
return true;
}
-bool CpuProfilerStopCommand::run(OperationContext* txn,
+bool CpuProfilerStopCommand::run(OperationContext* opCtx,
std::string const& db,
BSONObj& cmdObj,
int options,
std::string& errmsg,
BSONObjBuilder& result) {
// The DB lock here is just so we have IX on the global lock in order to prevent shutdown
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbXLock(txn->lockState(), db, MODE_X);
- OldClientContext ctx(txn, db, false /* no shard version checking */);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock dbXLock(opCtx->lockState(), db, MODE_X);
+ OldClientContext ctx(opCtx, db, false /* no shard version checking */);
::ProfilerStop();
return true;
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 681370348ce..8c887502d62 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -156,12 +156,12 @@ StatusWith<std::vector<BSONObj>> parseAndValidateIndexSpecs(
* form stored in the IndexCatalog should any of these indexes already exist.
*/
StatusWith<std::vector<BSONObj>> resolveCollectionDefaultProperties(
- OperationContext* txn, const Collection* collection, std::vector<BSONObj> indexSpecs) {
+ OperationContext* opCtx, const Collection* collection, std::vector<BSONObj> indexSpecs) {
std::vector<BSONObj> indexSpecsWithDefaults = std::move(indexSpecs);
for (size_t i = 0, numIndexSpecs = indexSpecsWithDefaults.size(); i < numIndexSpecs; ++i) {
auto indexSpecStatus = index_key_validate::validateIndexSpecCollation(
- txn, indexSpecsWithDefaults[i], collection->getDefaultCollator());
+ opCtx, indexSpecsWithDefaults[i], collection->getDefaultCollator());
if (!indexSpecStatus.isOK()) {
return indexSpecStatus.getStatus();
}
@@ -171,7 +171,7 @@ StatusWith<std::vector<BSONObj>> resolveCollectionDefaultProperties(
indexSpec[IndexDescriptor::kKeyPatternFieldName].Obj())) {
std::unique_ptr<CollatorInterface> indexCollator;
if (auto collationElem = indexSpec[IndexDescriptor::kCollationFieldName]) {
- auto collatorStatus = CollatorFactoryInterface::get(txn->getServiceContext())
+ auto collatorStatus = CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(collationElem.Obj());
// validateIndexSpecCollation() should have checked that the index collation spec is
// valid.
@@ -225,7 +225,7 @@ public:
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -246,56 +246,56 @@ public:
// now we know we have to create index(es)
// Note: createIndexes command does not currently respect shard versioning.
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbLock(txn->lockState(), ns.db(), MODE_X);
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, ns)) {
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock dbLock(opCtx->lockState(), ns.db(), MODE_X);
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, ns)) {
return appendCommandStatus(
result,
Status(ErrorCodes::NotMaster,
str::stream() << "Not primary while creating indexes in " << ns.ns()));
}
- Database* db = dbHolder().get(txn, ns.db());
+ Database* db = dbHolder().get(opCtx, ns.db());
if (!db) {
- db = dbHolder().openDb(txn, ns.db());
+ db = dbHolder().openDb(opCtx, ns.db());
}
Collection* collection = db->getCollection(ns.ns());
if (collection) {
result.appendBool("createdCollectionAutomatically", false);
} else {
- if (db->getViewCatalog()->lookup(txn, ns.ns())) {
+ if (db->getViewCatalog()->lookup(opCtx, ns.ns())) {
errmsg = "Cannot create indexes on a view";
return appendCommandStatus(result, {ErrorCodes::CommandNotSupportedOnView, errmsg});
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(txn);
- collection = db->createCollection(txn, ns.ns(), CollectionOptions());
+ WriteUnitOfWork wunit(opCtx);
+ collection = db->createCollection(opCtx, ns.ns(), CollectionOptions());
invariant(collection);
wunit.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, kCommandName, ns.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, kCommandName, ns.ns());
result.appendBool("createdCollectionAutomatically", true);
}
auto indexSpecsWithDefaults =
- resolveCollectionDefaultProperties(txn, collection, std::move(specs));
+ resolveCollectionDefaultProperties(opCtx, collection, std::move(specs));
if (!indexSpecsWithDefaults.isOK()) {
return appendCommandStatus(result, indexSpecsWithDefaults.getStatus());
}
specs = std::move(indexSpecsWithDefaults.getValue());
- const int numIndexesBefore = collection->getIndexCatalog()->numIndexesTotal(txn);
+ const int numIndexesBefore = collection->getIndexCatalog()->numIndexesTotal(opCtx);
result.append("numIndexesBefore", numIndexesBefore);
- auto client = txn->getClient();
+ auto client = opCtx->getClient();
ScopeGuard lastOpSetterGuard =
MakeObjGuard(repl::ReplClientInfo::forClient(client),
&repl::ReplClientInfo::setLastOpToSystemLastOpTime,
- txn);
+ opCtx);
- MultiIndexBlock indexer(txn, collection);
+ MultiIndexBlock indexer(opCtx, collection);
indexer.allowBackgroundBuilding();
indexer.allowInterruption();
@@ -315,7 +315,7 @@ public:
for (size_t i = 0; i < specs.size(); i++) {
const BSONObj& spec = specs[i];
if (spec["unique"].trueValue()) {
- status = checkUniqueIndexConstraints(txn, ns.ns(), spec["key"].Obj());
+ status = checkUniqueIndexConstraints(opCtx, ns.ns(), spec["key"].Obj());
if (!status.isOK()) {
return appendCommandStatus(result, status);
@@ -327,14 +327,14 @@ public:
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
indexInfoObjs = uassertStatusOK(indexer.init(specs));
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, kCommandName, ns.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, kCommandName, ns.ns());
// If we're a background index, replace exclusive db lock with an intent lock, so that
// other readers and writers can proceed during this phase.
if (indexer.getBuildInBackground()) {
- txn->recoveryUnit()->abandonSnapshot();
+ opCtx->recoveryUnit()->abandonSnapshot();
dbLock.relockWithMode(MODE_IX);
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, ns)) {
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, ns)) {
return appendCommandStatus(
result,
Status(ErrorCodes::NotMaster,
@@ -344,7 +344,7 @@ public:
}
try {
- Lock::CollectionLock colLock(txn->lockState(), ns.ns(), MODE_IX);
+ Lock::CollectionLock colLock(opCtx->lockState(), ns.ns(), MODE_IX);
uassertStatusOK(indexer.insertAllDocumentsInCollection());
} catch (const DBException& e) {
invariant(e.getCode() != ErrorCodes::WriteConflict);
@@ -354,9 +354,9 @@ public:
try {
// This function cannot throw today, but we will preemptively prepare for
// that day, to avoid data corruption due to lack of index cleanup.
- txn->recoveryUnit()->abandonSnapshot();
+ opCtx->recoveryUnit()->abandonSnapshot();
dbLock.relockWithMode(MODE_X);
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, ns)) {
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, ns)) {
return appendCommandStatus(
result,
Status(ErrorCodes::NotMaster,
@@ -374,33 +374,33 @@ public:
}
// Need to return db lock back to exclusive, to complete the index build.
if (indexer.getBuildInBackground()) {
- txn->recoveryUnit()->abandonSnapshot();
+ opCtx->recoveryUnit()->abandonSnapshot();
dbLock.relockWithMode(MODE_X);
uassert(ErrorCodes::NotMaster,
str::stream() << "Not primary while completing index build in " << dbname,
- repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, ns));
+ repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, ns));
- Database* db = dbHolder().get(txn, ns.db());
+ Database* db = dbHolder().get(opCtx, ns.db());
uassert(28551, "database dropped during index build", db);
uassert(28552, "collection dropped during index build", db->getCollection(ns.ns()));
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
indexer.commit();
for (auto&& infoObj : indexInfoObjs) {
std::string systemIndexes = ns.getSystemIndexesCollection();
getGlobalServiceContext()->getOpObserver()->onCreateIndex(
- txn, systemIndexes, infoObj, false);
+ opCtx, systemIndexes, infoObj, false);
}
wunit.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, kCommandName, ns.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, kCommandName, ns.ns());
- result.append("numIndexesAfter", collection->getIndexCatalog()->numIndexesTotal(txn));
+ result.append("numIndexesAfter", collection->getIndexCatalog()->numIndexesTotal(opCtx));
lastOpSetterGuard.Dismiss();
@@ -408,12 +408,12 @@ public:
}
private:
- static Status checkUniqueIndexConstraints(OperationContext* txn,
+ static Status checkUniqueIndexConstraints(OperationContext* opCtx,
StringData ns,
const BSONObj& newIdxKey) {
- invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(ns, MODE_X));
- auto metadata(CollectionShardingState::get(txn, ns.toString())->getMetadata());
+ auto metadata(CollectionShardingState::get(opCtx, ns.toString())->getMetadata());
if (metadata) {
ShardKeyPattern shardKeyPattern(metadata->getKeyPattern());
if (!shardKeyPattern.isUniqueIndexCompatible(newIdxKey)) {
diff --git a/src/mongo/db/commands/current_op.cpp b/src/mongo/db/commands/current_op.cpp
index c696533ef7a..c5f05e314ae 100644
--- a/src/mongo/db/commands/current_op.cpp
+++ b/src/mongo/db/commands/current_op.cpp
@@ -85,7 +85,7 @@ public:
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& db,
BSONObj& cmdObj,
int options,
@@ -117,14 +117,14 @@ public:
std::vector<BSONObj> inprogInfos;
BSONArrayBuilder inprogBuilder(result.subarrayStart("inprog"));
- for (ServiceContext::LockedClientsCursor cursor(txn->getClient()->getServiceContext());
+ for (ServiceContext::LockedClientsCursor cursor(opCtx->getClient()->getServiceContext());
Client* client = cursor.next();) {
invariant(client);
stdx::lock_guard<Client> lk(*client);
if (ownOpsOnly &&
- !AuthorizationSession::get(txn->getClient())->isCoauthorizedWithClient(client)) {
+ !AuthorizationSession::get(opCtx->getClient())->isCoauthorizedWithClient(client)) {
continue;
}
@@ -183,7 +183,7 @@ public:
// don't have a collection, we pass in a fake collection name (and this is okay,
// because $where parsing only relies on the database part of the namespace).
const NamespaceString fakeNS(db, "$dummyNamespaceForCurrop");
- const Matcher matcher(filter, ExtensionsCallbackReal(txn, &fakeNS), nullptr);
+ const Matcher matcher(filter, ExtensionsCallbackReal(opCtx, &fakeNS), nullptr);
for (const auto& info : inprogInfos) {
if (matcher.matches(info)) {
diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp
index 9ba85e79f9f..7b4879425a3 100644
--- a/src/mongo/db/commands/dbcommands.cpp
+++ b/src/mongo/db/commands/dbcommands.cpp
@@ -121,8 +121,8 @@ using std::stringstream;
using std::unique_ptr;
namespace {
-void registerErrorImpl(OperationContext* txn, const DBException& exception) {
- CurOp::get(txn)->debug().exceptionInfo = exception.getInfo();
+void registerErrorImpl(OperationContext* opCtx, const DBException& exception) {
+ CurOp::get(opCtx)->debug().exceptionInfo = exception.getInfo();
}
MONGO_INITIALIZER(InitializeRegisterErrorHandler)(InitializerContext* const) {
@@ -130,18 +130,18 @@ MONGO_INITIALIZER(InitializeRegisterErrorHandler)(InitializerContext* const) {
return Status::OK();
}
/**
- * For replica set members it returns the last known op time from txn. Otherwise will return
+ * For replica set members it returns the last known op time from opCtx. Otherwise will return
* uninitialized logical time.
*/
-LogicalTime _getClientOperationTime(OperationContext* txn) {
+LogicalTime _getClientOperationTime(OperationContext* opCtx) {
repl::ReplicationCoordinator* replCoord =
- repl::ReplicationCoordinator::get(txn->getClient()->getServiceContext());
+ repl::ReplicationCoordinator::get(opCtx->getClient()->getServiceContext());
const bool isReplSet =
replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet;
LogicalTime operationTime;
if (isReplSet) {
operationTime = LogicalTime(
- repl::ReplClientInfo::forClient(txn->getClient()).getLastOp().getTimestamp());
+ repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp().getTimestamp());
}
return operationTime;
}
@@ -159,7 +159,7 @@ public:
<< "N to wait N seconds for other members to catch up.";
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -173,7 +173,7 @@ public:
}
Status status = repl::getGlobalReplicationCoordinator()->stepDown(
- txn, force, Seconds(timeoutSecs), Seconds(120));
+ opCtx, force, Seconds(timeoutSecs), Seconds(120));
if (!status.isOK() && status.code() != ErrorCodes::NotMaster) { // ignore not master
return appendCommandStatus(result, status);
}
@@ -209,7 +209,7 @@ public:
CmdDropDatabase() : Command("dropDatabase") {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -239,7 +239,7 @@ public:
result, Status(ErrorCodes::IllegalOperation, "have to pass 1 as db parameter"));
}
- Status status = dropDatabase(txn, dbname);
+ Status status = dropDatabase(opCtx, dbname);
if (status == ErrorCodes::NamespaceNotFound) {
return appendCommandStatus(result, Status::OK());
}
@@ -278,7 +278,7 @@ public:
CmdRepairDatabase() : Command("repairDatabase") {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -291,9 +291,9 @@ public:
}
// Closing a database requires a global lock.
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
- if (!dbHolder().get(txn, dbname)) {
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite lk(opCtx->lockState());
+ if (!dbHolder().get(opCtx, dbname)) {
// If the name doesn't make an exact match, check for a case insensitive match.
std::set<std::string> otherCasing = dbHolder().getNamesWithConflictingCasing(dbname);
if (otherCasing.empty()) {
@@ -310,9 +310,9 @@ public:
// TODO (Kal): OldClientContext legacy, needs to be removed
{
- CurOp::get(txn)->ensureStarted();
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setNS_inlock(dbname);
+ CurOp::get(opCtx)->ensureStarted();
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setNS_inlock(dbname);
}
log() << "repairDatabase " << dbname;
@@ -324,14 +324,14 @@ public:
bool backupOriginalFiles = e.isBoolean() && e.boolean();
StorageEngine* engine = getGlobalServiceContext()->getGlobalStorageEngine();
- bool shouldReplicateWrites = txn->writesAreReplicated();
- txn->setReplicatedWrites(false);
- ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, txn, shouldReplicateWrites);
- Status status =
- repairDatabase(txn, engine, dbname, preserveClonedFilesOnFailure, backupOriginalFiles);
+ bool shouldReplicateWrites = opCtx->writesAreReplicated();
+ opCtx->setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, opCtx, shouldReplicateWrites);
+ Status status = repairDatabase(
+ opCtx, engine, dbname, preserveClonedFilesOnFailure, backupOriginalFiles);
// Open database before returning
- dbHolder().openDb(txn, dbname);
+ dbHolder().openDb(opCtx, dbname);
return appendCommandStatus(result, status);
}
} cmdRepairDatabase;
@@ -385,7 +385,7 @@ public:
CmdProfile() : Command("profile") {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -403,8 +403,8 @@ public:
Status status = Status::OK();
- ScopedTransaction transaction(txn, transactionMode);
- AutoGetDb ctx(txn, dbname, dbMode);
+ ScopedTransaction transaction(opCtx, transactionMode);
+ AutoGetDb ctx(opCtx, dbname, dbMode);
Database* db = ctx.getDb();
result.append("was", db ? db->getProfilingLevel() : serverGlobalParams.defaultProfile);
@@ -415,9 +415,9 @@ public:
if (!db) {
// When setting the profiling level, create the database if it didn't already exist.
// When just reading the profiling level, we do not create the database.
- db = dbHolder().openDb(txn, dbname);
+ db = dbHolder().openDb(opCtx, dbname);
}
- status = db->setProfilingLevel(txn, profilingLevel);
+ status = db->setProfilingLevel(opCtx, profilingLevel);
}
const BSONElement slow = cmdObj["slowms"];
@@ -470,7 +470,7 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -483,14 +483,14 @@ public:
// This doesn't look like it requires exclusive DB lock, because it uses its own diag
// locking, but originally the lock was set to be WRITE, so preserving the behaviour.
//
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbXLock(txn->lockState(), dbname, MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock dbXLock(opCtx->lockState(), dbname, MODE_X);
// TODO (Kal): OldClientContext legacy, needs to be removed
{
- CurOp::get(txn)->ensureStarted();
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setNS_inlock(dbname);
+ CurOp::get(opCtx)->ensureStarted();
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setNS_inlock(dbname);
}
int was = _diaglog.setLevel(cmdObj.firstElement().numberInt());
@@ -530,7 +530,7 @@ public:
return true;
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -550,7 +550,7 @@ public:
return false;
}
- return appendCommandStatus(result, dropCollection(txn, nsToDrop, result));
+ return appendCommandStatus(result, dropCollection(opCtx, nsToDrop, result));
}
} cmdDrop;
@@ -582,7 +582,7 @@ public:
return AuthorizationSession::get(client)->checkAuthForCreate(nss, cmdObj);
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -649,7 +649,7 @@ public:
{ErrorCodes::TypeMismatch,
str::stream() << "'collation' has to be a document: " << collationElem});
}
- auto collatorStatus = CollatorFactoryInterface::get(txn->getServiceContext())
+ auto collatorStatus = CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(collationElem.Obj());
if (!collatorStatus.isOK()) {
return appendCommandStatus(result, collatorStatus.getStatus());
@@ -657,10 +657,10 @@ public:
defaultCollator = std::move(collatorStatus.getValue());
}
idIndexSpec = uassertStatusOK(index_key_validate::validateIndexSpecCollation(
- txn, idIndexSpec, defaultCollator.get()));
+ opCtx, idIndexSpec, defaultCollator.get()));
std::unique_ptr<CollatorInterface> idIndexCollator;
if (auto collationElem = idIndexSpec["collation"]) {
- auto collatorStatus = CollatorFactoryInterface::get(txn->getServiceContext())
+ auto collatorStatus = CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(collationElem.Obj());
// validateIndexSpecCollation() should have checked that the _id index collation
// spec is valid.
@@ -677,12 +677,12 @@ public:
// Remove "idIndex" field from command.
auto resolvedCmdObj = cmdObj.removeField("idIndex");
- return appendCommandStatus(result,
- createCollection(txn, dbname, resolvedCmdObj, idIndexSpec));
+ return appendCommandStatus(
+ result, createCollection(opCtx, dbname, resolvedCmdObj, idIndexSpec));
}
BSONObj idIndexSpec;
- return appendCommandStatus(result, createCollection(txn, dbname, cmdObj, idIndexSpec));
+ return appendCommandStatus(result, createCollection(opCtx, dbname, cmdObj, idIndexSpec));
}
} cmdCreate;
@@ -724,7 +724,7 @@ public:
out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), ActionType::find));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& jsobj,
int,
@@ -763,7 +763,7 @@ public:
qr->setSort(sort);
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn, std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx, std::move(qr), ExtensionsCallbackDisallowExtensions());
if (!statusWithCQ.isOK()) {
uasserted(17240, "Can't canonicalize query " + query.toString());
return 0;
@@ -773,10 +773,10 @@ public:
// Check shard version at startup.
// This will throw before we've done any work if shard version is outdated
// We drop and re-acquire these locks every document because md5'ing is expensive
- unique_ptr<AutoGetCollectionForRead> ctx(new AutoGetCollectionForRead(txn, nss));
+ unique_ptr<AutoGetCollectionForRead> ctx(new AutoGetCollectionForRead(opCtx, nss));
Collection* coll = ctx->getCollection();
- auto statusWithPlanExecutor = getExecutor(txn,
+ auto statusWithPlanExecutor = getExecutor(opCtx,
coll,
std::move(cq),
PlanExecutor::YIELD_MANUAL,
@@ -801,7 +801,7 @@ public:
break; // skipped chunk is probably on another shard
}
log() << "should have chunk: " << n << " have:" << myn;
- dumpChunks(txn, nss.ns(), query, sort);
+ dumpChunks(opCtx, nss.ns(), query, sort);
uassert(10040, "chunks out of order", n == myn);
}
@@ -819,7 +819,7 @@ public:
try {
// RELOCKED
- ctx.reset(new AutoGetCollectionForRead(txn, nss));
+ ctx.reset(new AutoGetCollectionForRead(opCtx, nss));
} catch (const SendStaleConfigException& ex) {
LOG(1) << "chunk metadata changed during filemd5, will retarget and continue";
break;
@@ -850,15 +850,15 @@ public:
result.append("numChunks", n);
result.append("md5", digestToString(d));
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "filemd5", dbname);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "filemd5", dbname);
return true;
}
- void dumpChunks(OperationContext* txn,
+ void dumpChunks(OperationContext* opCtx,
const string& ns,
const BSONObj& query,
const BSONObj& sort) {
- DBDirectClient client(txn);
+ DBDirectClient client(opCtx);
Query q(query);
q.sort(sort);
unique_ptr<DBClientCursor> c = client.query(ns, q);
@@ -905,7 +905,7 @@ public:
out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& jsobj,
int,
@@ -919,12 +919,12 @@ public:
BSONObj keyPattern = jsobj.getObjectField("keyPattern");
bool estimate = jsobj["estimate"].trueValue();
- AutoGetCollectionForRead ctx(txn, NamespaceString(ns));
+ AutoGetCollectionForRead ctx(opCtx, NamespaceString(ns));
Collection* collection = ctx.getCollection();
long long numRecords = 0;
if (collection) {
- numRecords = collection->numRecords(txn);
+ numRecords = collection->numRecords(opCtx);
}
if (numRecords == 0) {
@@ -939,12 +939,13 @@ public:
unique_ptr<PlanExecutor> exec;
if (min.isEmpty() && max.isEmpty()) {
if (estimate) {
- result.appendNumber("size", static_cast<long long>(collection->dataSize(txn)));
+ result.appendNumber("size", static_cast<long long>(collection->dataSize(opCtx)));
result.appendNumber("numObjects", numRecords);
result.append("millis", timer.millis());
return 1;
}
- exec = InternalPlanner::collectionScan(txn, ns, collection, PlanExecutor::YIELD_MANUAL);
+ exec =
+ InternalPlanner::collectionScan(opCtx, ns, collection, PlanExecutor::YIELD_MANUAL);
} else if (min.isEmpty() || max.isEmpty()) {
errmsg = "only one of min or max specified";
return false;
@@ -955,7 +956,7 @@ public:
}
IndexDescriptor* idx =
- collection->getIndexCatalog()->findShardKeyPrefixedIndex(txn,
+ collection->getIndexCatalog()->findShardKeyPrefixedIndex(opCtx,
keyPattern,
true); // requireSingleKey
@@ -968,7 +969,7 @@ public:
min = Helpers::toKeyFormat(kp.extendRangeBound(min, false));
max = Helpers::toKeyFormat(kp.extendRangeBound(max, false));
- exec = InternalPlanner::indexScan(txn,
+ exec = InternalPlanner::indexScan(opCtx,
collection,
idx,
min,
@@ -977,7 +978,7 @@ public:
PlanExecutor::YIELD_MANUAL);
}
- long long avgObjSize = collection->dataSize(txn) / numRecords;
+ long long avgObjSize = collection->dataSize(opCtx) / numRecords;
long long maxSize = jsobj["maxSize"].numberLong();
long long maxObjects = jsobj["maxObjects"].numberLong();
@@ -992,7 +993,7 @@ public:
if (estimate)
size += avgObjSize;
else
- size += collection->getRecordStore()->dataFor(txn, loc).size();
+ size += collection->getRecordStore()->dataFor(opCtx, loc).size();
numObjects++;
@@ -1049,7 +1050,7 @@ public:
out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& jsobj,
int,
@@ -1063,7 +1064,7 @@ public:
}
result.append("ns", nss.ns());
- Status status = appendCollectionStorageStats(txn, nss, jsobj, &result);
+ Status status = appendCollectionStorageStats(opCtx, nss, jsobj, &result);
if (!status.isOK()) {
errmsg = status.reason();
return false;
@@ -1098,14 +1099,14 @@ public:
return AuthorizationSession::get(client)->checkAuthForCollMod(nss, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& jsobj,
int,
string& errmsg,
BSONObjBuilder& result) {
const NamespaceString nss(parseNsCollectionRequired(dbname, jsobj));
- return appendCommandStatus(result, collMod(txn, nss, jsobj, &result));
+ return appendCommandStatus(result, collMod(opCtx, nss, jsobj, &result));
}
} collectionModCommand;
@@ -1134,7 +1135,7 @@ public:
out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& jsobj,
int,
@@ -1159,16 +1160,16 @@ public:
// TODO (Kal): OldClientContext legacy, needs to be removed
{
- CurOp::get(txn)->ensureStarted();
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setNS_inlock(dbname);
+ CurOp::get(opCtx)->ensureStarted();
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setNS_inlock(dbname);
}
// We lock the entire database in S-mode in order to ensure that the contents will not
// change for the stats snapshot. This might be unnecessary and if it becomes a
// performance issue, we can take IS lock and then lock collection-by-collection.
- ScopedTransaction scopedXact(txn, MODE_IS);
- AutoGetDb autoDb(txn, ns, MODE_S);
+ ScopedTransaction scopedXact(opCtx, MODE_IS);
+ AutoGetDb autoDb(opCtx, ns, MODE_S);
result.append("db", ns);
@@ -1191,12 +1192,12 @@ public:
result.appendNumber("fileSize", 0);
} else {
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
// TODO: OldClientContext legacy, needs to be removed
- CurOp::get(txn)->enter_inlock(dbname.c_str(), db->getProfilingLevel());
+ CurOp::get(opCtx)->enter_inlock(dbname.c_str(), db->getProfilingLevel());
}
- db->getStats(txn, &result, scale);
+ db->getStats(opCtx, &result, scale);
}
return true;
@@ -1220,13 +1221,13 @@ public:
virtual void addRequiredPrivileges(const std::string& dbname,
const BSONObj& cmdObj,
std::vector<Privilege>* out) {} // No auth required
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
string& errmsg,
BSONObjBuilder& result) {
- result << "you" << txn->getClient()->clientAddress(true /*includePort*/);
+ result << "you" << opCtx->getClient()->clientAddress(true /*includePort*/);
return true;
}
} cmdWhatsMyUri;
@@ -1247,7 +1248,7 @@ public:
return Status::OK();
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -1299,10 +1300,10 @@ const std::array<StringData, 4> neededFieldNames{QueryRequest::cmdOptionMaxTimeM
QueryRequest::queryOptionMaxTimeMS};
} // namespace
-void appendOpTimeMetadata(OperationContext* txn,
+void appendOpTimeMetadata(OperationContext* opCtx,
const rpc::RequestInterface& request,
BSONObjBuilder* metadataBob) {
- const bool isShardingAware = ShardingState::get(txn)->enabled();
+ const bool isShardingAware = ShardingState::get(opCtx)->enabled();
const bool isConfig = serverGlobalParams.clusterRole == ClusterRole::ConfigServer;
repl::ReplicationCoordinator* replCoord = repl::getGlobalReplicationCoordinator();
const bool isReplSet =
@@ -1311,7 +1312,7 @@ void appendOpTimeMetadata(OperationContext* txn,
if (isReplSet) {
// Attach our own last opTime.
repl::OpTime lastOpTimeFromClient =
- repl::ReplClientInfo::forClient(txn->getClient()).getLastOp();
+ repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp();
replCoord->prepareReplMetadata(request.getMetadata(), lastOpTimeFromClient, metadataBob);
// For commands from mongos, append some info to help getLastError(w) work.
// TODO: refactor out of here as part of SERVER-18236
@@ -1329,11 +1330,11 @@ void appendOpTimeMetadata(OperationContext* txn,
}
namespace {
-void execCommandHandler(OperationContext* const txn,
+void execCommandHandler(OperationContext* const opCtx,
Command* const command,
const rpc::RequestInterface& request,
rpc::ReplyBuilderInterface* const replyBuilder) {
- mongo::execCommandDatabase(txn, command, request, replyBuilder);
+ mongo::execCommandDatabase(opCtx, command, request, replyBuilder);
}
MONGO_INITIALIZER(InitializeCommandExecCommandHandler)(InitializerContext* const) {
@@ -1346,7 +1347,7 @@ MONGO_INITIALIZER(InitializeCommandExecCommandHandler)(InitializerContext* const
// use shardingState and the repl coordinator without changing our entire library
// structure.
// It will be moved back as part of SERVER-18236.
-bool Command::run(OperationContext* txn,
+bool Command::run(OperationContext* opCtx,
const rpc::RequestInterface& request,
rpc::ReplyBuilderInterface* replyBuilder) {
auto bytesToReserve = reserveBytesForReply();
@@ -1366,7 +1367,7 @@ bool Command::run(OperationContext* txn,
const std::string db = request.getDatabase().toString();
BSONObjBuilder inPlaceReplyBob(replyBuilder->getInPlaceReplyBuilder(bytesToReserve));
- auto readConcernArgsStatus = extractReadConcern(txn, cmd, supportsReadConcern());
+ auto readConcernArgsStatus = extractReadConcern(opCtx, cmd, supportsReadConcern());
if (!readConcernArgsStatus.isOK()) {
auto result = appendCommandStatus(inPlaceReplyBob, readConcernArgsStatus.getStatus());
@@ -1375,7 +1376,7 @@ bool Command::run(OperationContext* txn,
return result;
}
- Status rcStatus = waitForReadConcern(txn, readConcernArgsStatus.getValue());
+ Status rcStatus = waitForReadConcern(opCtx, readConcernArgsStatus.getValue());
if (!rcStatus.isOK()) {
if (rcStatus == ErrorCodes::ExceededTimeLimit) {
const int debugLevel =
@@ -1393,7 +1394,7 @@ bool Command::run(OperationContext* txn,
std::string errmsg;
bool result;
- auto startOperationTime = _getClientOperationTime(txn);
+ auto startOperationTime = _getClientOperationTime(opCtx);
if (!supportsWriteConcern(cmd)) {
if (commandSpecifiesWriteConcern(cmd)) {
auto result = appendCommandStatus(
@@ -1405,9 +1406,9 @@ bool Command::run(OperationContext* txn,
}
// TODO: remove queryOptions parameter from command's run method.
- result = run(txn, db, cmd, 0, errmsg, inPlaceReplyBob);
+ result = run(opCtx, db, cmd, 0, errmsg, inPlaceReplyBob);
} else {
- auto wcResult = extractWriteConcern(txn, cmd, db);
+ auto wcResult = extractWriteConcern(opCtx, cmd, db);
if (!wcResult.isOK()) {
auto result = appendCommandStatus(inPlaceReplyBob, wcResult.getStatus());
inPlaceReplyBob.doneFast();
@@ -1416,20 +1417,20 @@ bool Command::run(OperationContext* txn,
}
// Change the write concern while running the command.
- const auto oldWC = txn->getWriteConcern();
- ON_BLOCK_EXIT([&] { txn->setWriteConcern(oldWC); });
- txn->setWriteConcern(wcResult.getValue());
+ const auto oldWC = opCtx->getWriteConcern();
+ ON_BLOCK_EXIT([&] { opCtx->setWriteConcern(oldWC); });
+ opCtx->setWriteConcern(wcResult.getValue());
- result = run(txn, db, cmd, 0, errmsg, inPlaceReplyBob);
+ result = run(opCtx, db, cmd, 0, errmsg, inPlaceReplyBob);
// Nothing in run() should change the writeConcern.
- dassert(SimpleBSONObjComparator::kInstance.evaluate(txn->getWriteConcern().toBSON() ==
+ dassert(SimpleBSONObjComparator::kInstance.evaluate(opCtx->getWriteConcern().toBSON() ==
wcResult.getValue().toBSON()));
WriteConcernResult res;
auto waitForWCStatus =
- waitForWriteConcern(txn,
- repl::ReplClientInfo::forClient(txn->getClient()).getLastOp(),
+ waitForWriteConcern(opCtx,
+ repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(),
wcResult.getValue(),
&res);
appendCommandWCStatus(inPlaceReplyBob, waitForWCStatus, res);
@@ -1450,7 +1451,7 @@ bool Command::run(OperationContext* txn,
repl::ReadConcernLevel::kLinearizableReadConcern) &&
(request.getCommandName() != "getMore")) {
- auto linearizableReadStatus = waitForLinearizableReadConcern(txn);
+ auto linearizableReadStatus = waitForLinearizableReadConcern(opCtx);
if (!linearizableReadStatus.isOK()) {
inPlaceReplyBob.resetToEmpty();
@@ -1463,14 +1464,14 @@ bool Command::run(OperationContext* txn,
appendCommandStatus(inPlaceReplyBob, result, errmsg);
- auto finishOperationTime = _getClientOperationTime(txn);
+ auto finishOperationTime = _getClientOperationTime(opCtx);
auto operationTime = finishOperationTime;
invariant(finishOperationTime >= startOperationTime);
// this command did not write, so return current clusterTime.
if (finishOperationTime == startOperationTime) {
// TODO: SERVER-27786 to return the clusterTime of the read.
- operationTime = LogicalClock::get(txn)->getClusterTime().getTime();
+ operationTime = LogicalClock::get(opCtx)->getClusterTime().getTime();
}
appendOperationTime(inPlaceReplyBob, operationTime);
@@ -1478,7 +1479,7 @@ bool Command::run(OperationContext* txn,
inPlaceReplyBob.doneFast();
BSONObjBuilder metadataBob;
- appendOpTimeMetadata(txn, request, &metadataBob);
+ appendOpTimeMetadata(opCtx, request, &metadataBob);
replyBuilder->setMetadata(metadataBob.done());
return result;
@@ -1495,20 +1496,20 @@ bool Command::run(OperationContext* txn,
- context
then calls run()
*/
-void mongo::execCommandDatabase(OperationContext* txn,
+void mongo::execCommandDatabase(OperationContext* opCtx,
Command* command,
const rpc::RequestInterface& request,
rpc::ReplyBuilderInterface* replyBuilder) {
try {
{
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setCommand_inlock(command);
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setCommand_inlock(command);
}
// TODO: move this back to runCommands when mongos supports OperationContext
// see SERVER-18515 for details.
- uassertStatusOK(rpc::readRequestMetadata(txn, request.getMetadata()));
- rpc::TrackingMetadata::get(txn).initWithOperName(command->getName());
+ uassertStatusOK(rpc::readRequestMetadata(opCtx, request.getMetadata()));
+ rpc::TrackingMetadata::get(opCtx).initWithOperName(command->getName());
dassert(replyBuilder->getState() == rpc::ReplyBuilderInterface::State::kCommandReply);
@@ -1520,30 +1521,30 @@ void mongo::execCommandDatabase(OperationContext* txn,
request.getCommandArgs().getFields(neededFieldNames, &extractedFields);
if (Command::isHelpRequest(extractedFields[kHelpField])) {
- CurOp::get(txn)->ensureStarted();
+ CurOp::get(opCtx)->ensureStarted();
// We disable last-error for help requests due to SERVER-11492, because config servers
// use help requests to determine which commands are database writes, and so must be
// forwarded to all config servers.
- LastError::get(txn->getClient()).disable();
- Command::generateHelpResponse(txn, request, replyBuilder, *command);
+ LastError::get(opCtx->getClient()).disable();
+ Command::generateHelpResponse(opCtx, request, replyBuilder, *command);
return;
}
- ImpersonationSessionGuard guard(txn);
+ ImpersonationSessionGuard guard(opCtx);
uassertStatusOK(
- Command::checkAuthorization(command, txn, dbname, request.getCommandArgs()));
+ Command::checkAuthorization(command, opCtx, dbname, request.getCommandArgs()));
repl::ReplicationCoordinator* replCoord =
- repl::ReplicationCoordinator::get(txn->getClient()->getServiceContext());
- const bool iAmPrimary = replCoord->canAcceptWritesForDatabase_UNSAFE(txn, dbname);
+ repl::ReplicationCoordinator::get(opCtx->getClient()->getServiceContext());
+ const bool iAmPrimary = replCoord->canAcceptWritesForDatabase_UNSAFE(opCtx, dbname);
{
bool commandCanRunOnSecondary = command->slaveOk();
bool commandIsOverriddenToRunOnSecondary = command->slaveOverrideOk() &&
- rpc::ServerSelectionMetadata::get(txn).canRunOnSecondary();
+ rpc::ServerSelectionMetadata::get(opCtx).canRunOnSecondary();
- bool iAmStandalone = !txn->writesAreReplicated();
+ bool iAmStandalone = !opCtx->writesAreReplicated();
bool canRunHere = iAmPrimary || commandCanRunOnSecondary ||
commandIsOverriddenToRunOnSecondary || iAmStandalone;
@@ -1556,7 +1557,7 @@ void mongo::execCommandDatabase(OperationContext* txn,
if (!command->maintenanceOk() &&
replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet &&
- !replCoord->canAcceptWritesForDatabase_UNSAFE(txn, dbname) &&
+ !replCoord->canAcceptWritesForDatabase_UNSAFE(opCtx, dbname) &&
!replCoord->getMemberState().secondary()) {
uassert(ErrorCodes::NotMasterOrSecondary,
@@ -1596,43 +1597,43 @@ void mongo::execCommandDatabase(OperationContext* txn,
if (maxTimeMS > 0) {
uassert(40119,
"Illegal attempt to set operation deadline within DBDirectClient",
- !txn->getClient()->isInDirectClient());
- txn->setDeadlineAfterNowBy(Milliseconds{maxTimeMS});
+ !opCtx->getClient()->isInDirectClient());
+ opCtx->setDeadlineAfterNowBy(Milliseconds{maxTimeMS});
}
// Operations are only versioned against the primary. We also make sure not to redo shard
// version handling if this command was issued via the direct client.
- if (iAmPrimary && !txn->getClient()->isInDirectClient()) {
+ if (iAmPrimary && !opCtx->getClient()->isInDirectClient()) {
// Handle a shard version that may have been sent along with the command.
auto commandNS = NamespaceString(command->parseNs(dbname, request.getCommandArgs()));
- auto& oss = OperationShardingState::get(txn);
+ auto& oss = OperationShardingState::get(opCtx);
oss.initializeShardVersion(commandNS, extractedFields[kShardVersionFieldIdx]);
- auto shardingState = ShardingState::get(txn);
+ auto shardingState = ShardingState::get(opCtx);
if (oss.hasShardVersion()) {
uassertStatusOK(shardingState->canAcceptShardedCommands());
}
// Handle config optime information that may have been sent along with the command.
- uassertStatusOK(shardingState->updateConfigServerOpTimeFromMetadata(txn));
+ uassertStatusOK(shardingState->updateConfigServerOpTimeFromMetadata(opCtx));
}
// Can throw
- txn->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
+ opCtx->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
bool retval = false;
- CurOp::get(txn)->ensureStarted();
+ CurOp::get(opCtx)->ensureStarted();
command->_commandsExecuted.increment();
if (logger::globalLogDomain()->shouldLog(logger::LogComponent::kTracking,
logger::LogSeverity::Debug(1)) &&
- rpc::TrackingMetadata::get(txn).getParentOperId()) {
+ rpc::TrackingMetadata::get(opCtx).getParentOperId()) {
MONGO_LOG_COMPONENT(1, logger::LogComponent::kTracking)
- << rpc::TrackingMetadata::get(txn).toString();
- rpc::TrackingMetadata::get(txn).setIsLogged(true);
+ << rpc::TrackingMetadata::get(opCtx).toString();
+ rpc::TrackingMetadata::get(opCtx).setIsLogged(true);
}
- retval = command->run(txn, request, replyBuilder);
+ retval = command->run(opCtx, request, replyBuilder);
dassert(replyBuilder->getState() == rpc::ReplyBuilderInterface::State::kOutputDocs);
@@ -1645,15 +1646,15 @@ void mongo::execCommandDatabase(OperationContext* txn,
auto sce = dynamic_cast<const StaleConfigException*>(&e);
invariant(sce); // do not upcasts from DBException created by uassert variants.
- ShardingState::get(txn)->onStaleShardVersion(
- txn, NamespaceString(sce->getns()), sce->getVersionReceived());
+ ShardingState::get(opCtx)->onStaleShardVersion(
+ opCtx, NamespaceString(sce->getns()), sce->getVersionReceived());
}
BSONObjBuilder metadataBob;
- appendOpTimeMetadata(txn, request, &metadataBob);
+ appendOpTimeMetadata(opCtx, request, &metadataBob);
- auto operationTime = _getClientOperationTime(txn);
+ auto operationTime = _getClientOperationTime(opCtx);
Command::generateErrorResponse(
- txn, replyBuilder, e, request, command, metadataBob.done(), operationTime);
+ opCtx, replyBuilder, e, request, command, metadataBob.done(), operationTime);
}
}
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index 6d68cd93f00..0da2752a28d 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -80,7 +80,7 @@ public:
out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -109,8 +109,8 @@ public:
// We lock the entire database in S-mode in order to ensure that the contents will not
// change for the snapshot.
- ScopedTransaction scopedXact(txn, MODE_IS);
- AutoGetDb autoDb(txn, ns, MODE_S);
+ ScopedTransaction scopedXact(opCtx, MODE_IS);
+ AutoGetDb autoDb(opCtx, ns, MODE_S);
Database* db = autoDb.getDb();
if (db) {
db->getDatabaseCatalogEntry()->getCollectionNamespaces(&colls);
@@ -152,7 +152,7 @@ public:
continue;
bool fromCache = false;
- string hash = _hashCollection(txn, db, fullCollectionName, &fromCache);
+ string hash = _hashCollection(opCtx, db, fullCollectionName, &fromCache);
bb.append(shortCollectionName, hash);
@@ -174,11 +174,11 @@ public:
return 1;
}
- void wipeCacheForCollection(OperationContext* txn, const NamespaceString& ns) {
+ void wipeCacheForCollection(OperationContext* opCtx, const NamespaceString& ns) {
if (!_isCachable(ns))
return;
- txn->recoveryUnit()->onCommit([this, txn, ns] {
+ opCtx->recoveryUnit()->onCommit([this, opCtx, ns] {
stdx::lock_guard<stdx::mutex> lk(_cachedHashedMutex);
if (ns.isCommand()) {
// The <dbName>.$cmd namespace can represent a command that
@@ -274,9 +274,9 @@ private:
} // namespace
-void logOpForDbHash(OperationContext* txn, const char* ns) {
+void logOpForDbHash(OperationContext* opCtx, const char* ns) {
NamespaceString nsString(ns);
- dbhashCmd.wipeCacheForCollection(txn, nsString);
+ dbhashCmd.wipeCacheForCollection(opCtx, nsString);
}
} // namespace mongo
diff --git a/src/mongo/db/commands/dbhash.h b/src/mongo/db/commands/dbhash.h
index 2afc3efb454..09db7e97e0e 100644
--- a/src/mongo/db/commands/dbhash.h
+++ b/src/mongo/db/commands/dbhash.h
@@ -32,6 +32,6 @@ namespace mongo {
class OperationContext;
-void logOpForDbHash(OperationContext* txn, const char* ns);
+void logOpForDbHash(OperationContext* opCtx, const char* ns);
} // namespace mongo
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index 0f619356f96..febed50512f 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -111,7 +111,7 @@ public:
help << "{ distinct : 'collection name' , key : 'a.b' , query : {} }";
}
- virtual Status explain(OperationContext* txn,
+ virtual Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
ExplainCommon::Verbosity verbosity,
@@ -119,8 +119,8 @@ public:
BSONObjBuilder* out) const {
const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
- const ExtensionsCallbackReal extensionsCallback(txn, &nss);
- auto parsedDistinct = ParsedDistinct::parse(txn, nss, cmdObj, extensionsCallback, true);
+ const ExtensionsCallbackReal extensionsCallback(opCtx, &nss);
+ auto parsedDistinct = ParsedDistinct::parse(opCtx, nss, cmdObj, extensionsCallback, true);
if (!parsedDistinct.isOK()) {
return parsedDistinct.getStatus();
}
@@ -133,7 +133,7 @@ public:
"http://dochub.mongodb.org/core/3.4-feature-compatibility.");
}
- AutoGetCollectionOrViewForRead ctx(txn, nss);
+ AutoGetCollectionOrViewForRead ctx(opCtx, nss);
Collection* collection = ctx.getCollection();
if (ctx.getView()) {
@@ -145,12 +145,12 @@ public:
}
std::string errmsg;
(void)Command::findCommand("aggregate")
- ->run(txn, dbname, viewAggregation.getValue(), 0, errmsg, *out);
+ ->run(opCtx, dbname, viewAggregation.getValue(), 0, errmsg, *out);
return Status::OK();
}
auto executor = getExecutorDistinct(
- txn, collection, nss.ns(), &parsedDistinct.getValue(), PlanExecutor::YIELD_AUTO);
+ opCtx, collection, nss.ns(), &parsedDistinct.getValue(), PlanExecutor::YIELD_AUTO);
if (!executor.isOK()) {
return executor.getStatus();
}
@@ -159,7 +159,7 @@ public:
return Status::OK();
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -167,8 +167,8 @@ public:
BSONObjBuilder& result) {
const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
- const ExtensionsCallbackReal extensionsCallback(txn, &nss);
- auto parsedDistinct = ParsedDistinct::parse(txn, nss, cmdObj, extensionsCallback, false);
+ const ExtensionsCallbackReal extensionsCallback(opCtx, &nss);
+ auto parsedDistinct = ParsedDistinct::parse(opCtx, nss, cmdObj, extensionsCallback, false);
if (!parsedDistinct.isOK()) {
return appendCommandStatus(result, parsedDistinct.getStatus());
}
@@ -183,7 +183,7 @@ public:
"http://dochub.mongodb.org/core/3.4-feature-compatibility."));
}
- AutoGetCollectionOrViewForRead ctx(txn, nss);
+ AutoGetCollectionOrViewForRead ctx(opCtx, nss);
Collection* collection = ctx.getCollection();
if (ctx.getView()) {
@@ -196,7 +196,7 @@ public:
BSONObjBuilder aggResult;
(void)Command::findCommand("aggregate")
- ->run(txn, dbname, viewAggregation.getValue(), options, errmsg, aggResult);
+ ->run(opCtx, dbname, viewAggregation.getValue(), options, errmsg, aggResult);
if (ResolvedView::isResolvedViewErrorResponse(aggResult.asTempObj())) {
result.appendElements(aggResult.obj());
@@ -212,14 +212,14 @@ public:
}
auto executor = getExecutorDistinct(
- txn, collection, nss.ns(), &parsedDistinct.getValue(), PlanExecutor::YIELD_AUTO);
+ opCtx, collection, nss.ns(), &parsedDistinct.getValue(), PlanExecutor::YIELD_AUTO);
if (!executor.isOK()) {
return appendCommandStatus(result, executor.getStatus());
}
{
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setPlanSummary_inlock(
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setPlanSummary_inlock(
Explain::getPlanSummary(executor.getValue().get()));
}
@@ -274,13 +274,13 @@ public:
}
- auto curOp = CurOp::get(txn);
+ auto curOp = CurOp::get(opCtx);
// Get summary information about the plan.
PlanSummaryStats stats;
Explain::getSummaryStats(*executor.getValue(), &stats);
if (collection) {
- collection->infoCache()->notifyOfQuery(txn, stats.indexesUsed);
+ collection->infoCache()->notifyOfQuery(opCtx, stats.indexesUsed);
}
curOp->debug().setPlanSummaryMetrics(stats);
diff --git a/src/mongo/db/commands/driverHelpers.cpp b/src/mongo/db/commands/driverHelpers.cpp
index 8b6163e678b..c25887bf0a2 100644
--- a/src/mongo/db/commands/driverHelpers.cpp
+++ b/src/mongo/db/commands/driverHelpers.cpp
@@ -73,7 +73,7 @@ public:
virtual void addRequiredPrivileges(const std::string& dbname,
const BSONObj& cmdObj,
std::vector<Privilege>* out) {} // No auth required
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index b3f2b73d21f..86082761cce 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -84,14 +84,14 @@ public:
}
CmdDropIndexes() : Command("dropIndexes", false, "deleteIndexes") {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& jsobj,
int,
string& errmsg,
BSONObjBuilder& result) {
const NamespaceString nss = parseNsCollectionRequired(dbname, jsobj);
- return appendCommandStatus(result, dropIndexes(txn, nss, jsobj, &result));
+ return appendCommandStatus(result, dropIndexes(opCtx, nss, jsobj, &result));
}
} cmdDropIndexes;
@@ -116,25 +116,25 @@ public:
}
CmdReIndex() : Command("reIndex") {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& jsobj,
int,
string& errmsg,
BSONObjBuilder& result) {
- DBDirectClient db(txn);
+ DBDirectClient db(opCtx);
const NamespaceString toReIndexNs = parseNsCollectionRequired(dbname, jsobj);
LOG(0) << "CMD: reIndex " << toReIndexNs;
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbXLock(txn->lockState(), dbname, MODE_X);
- OldClientContext ctx(txn, toReIndexNs.ns());
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock dbXLock(opCtx->lockState(), dbname, MODE_X);
+ OldClientContext ctx(opCtx, toReIndexNs.ns());
Collection* collection = ctx.db()->getCollection(toReIndexNs.ns());
if (!collection) {
- if (ctx.db()->getViewCatalog()->lookup(txn, toReIndexNs.ns()))
+ if (ctx.db()->getViewCatalog()->lookup(opCtx, toReIndexNs.ns()))
return appendCommandStatus(
result, {ErrorCodes::CommandNotSupportedOnView, "can't re-index a view"});
else
@@ -152,12 +152,12 @@ public:
vector<BSONObj> all;
{
vector<string> indexNames;
- collection->getCatalogEntry()->getAllIndexes(txn, &indexNames);
+ collection->getCatalogEntry()->getAllIndexes(opCtx, &indexNames);
all.reserve(indexNames.size());
for (size_t i = 0; i < indexNames.size(); i++) {
const string& name = indexNames[i];
- BSONObj spec = collection->getCatalogEntry()->getIndexSpec(txn, name);
+ BSONObj spec = collection->getCatalogEntry()->getIndexSpec(opCtx, name);
{
BSONObjBuilder bob;
@@ -192,8 +192,8 @@ public:
result.appendNumber("nIndexesWas", all.size());
{
- WriteUnitOfWork wunit(txn);
- Status s = collection->getIndexCatalog()->dropAllIndexes(txn, true);
+ WriteUnitOfWork wunit(opCtx);
+ Status s = collection->getIndexCatalog()->dropAllIndexes(opCtx, true);
if (!s.isOK()) {
errmsg = "dropIndexes failed";
return appendCommandStatus(result, s);
@@ -201,7 +201,7 @@ public:
wunit.commit();
}
- MultiIndexBlock indexer(txn, collection);
+ MultiIndexBlock indexer(opCtx, collection);
// do not want interruption as that will leave us without indexes.
auto indexInfoObjs = indexer.init(all);
@@ -215,7 +215,7 @@ public:
}
{
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
indexer.commit();
wunit.commit();
}
@@ -224,8 +224,8 @@ public:
// This was also done when dropAllIndexes() committed, but we need to ensure that no one
// tries to read in the intermediate state where all indexes are newer than the current
// snapshot so are unable to be used.
- auto replCoord = repl::ReplicationCoordinator::get(txn);
- auto snapshotName = replCoord->reserveSnapshotName(txn);
+ auto replCoord = repl::ReplicationCoordinator::get(opCtx);
+ auto snapshotName = replCoord->reserveSnapshotName(opCtx);
replCoord->forceSnapshotCreation(); // Ensures a newer snapshot gets created even if idle.
collection->setMinimumVisibleSnapshot(snapshotName);
diff --git a/src/mongo/db/commands/eval.cpp b/src/mongo/db/commands/eval.cpp
index f9345d4247c..20945eb4828 100644
--- a/src/mongo/db/commands/eval.cpp
+++ b/src/mongo/db/commands/eval.cpp
@@ -58,7 +58,7 @@ namespace {
const int edebug = 0;
-bool dbEval(OperationContext* txn,
+bool dbEval(OperationContext* opCtx,
const string& dbName,
const BSONObj& cmd,
BSONObjBuilder& result,
@@ -92,7 +92,7 @@ bool dbEval(OperationContext* txn,
}
unique_ptr<Scope> s(getGlobalScriptEngine()->newScope());
- s->registerOperation(txn);
+ s->registerOperation(opCtx);
ScriptingFunction f = s->createFunction(code);
if (f == 0) {
@@ -100,7 +100,7 @@ bool dbEval(OperationContext* txn,
return false;
}
- s->localConnectForDbEval(txn, dbName.c_str());
+ s->localConnectForDbEval(opCtx, dbName.c_str());
if (e.type() == CodeWScope) {
s->init(e.codeWScopeScopeDataUnsafe());
@@ -171,22 +171,22 @@ public:
CmdEval() : Command("eval", false, "$eval") {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
if (cmdObj["nolock"].trueValue()) {
- return dbEval(txn, dbname, cmdObj, result, errmsg);
+ return dbEval(opCtx, dbname, cmdObj, result, errmsg);
}
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite lk(opCtx->lockState());
- OldClientContext ctx(txn, dbname, false /* no shard version checking */);
+ OldClientContext ctx(opCtx, dbname, false /* no shard version checking */);
- return dbEval(txn, dbname, cmdObj, result, errmsg);
+ return dbEval(opCtx, dbname, cmdObj, result, errmsg);
}
} cmdeval;
diff --git a/src/mongo/db/commands/explain_cmd.cpp b/src/mongo/db/commands/explain_cmd.cpp
index 678fd7effa2..7c72c26977f 100644
--- a/src/mongo/db/commands/explain_cmd.cpp
+++ b/src/mongo/db/commands/explain_cmd.cpp
@@ -89,7 +89,7 @@ public:
* the command that you are explaining. The auth check is performed recursively
* on the nested command.
*/
- virtual Status checkAuthForOperation(OperationContext* txn,
+ virtual Status checkAuthForOperation(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj) {
if (Object != cmdObj.firstElement().type()) {
@@ -105,10 +105,10 @@ public:
return Status(ErrorCodes::CommandNotFound, ss);
}
- return commToExplain->checkAuthForOperation(txn, dbname, explainObj);
+ return commToExplain->checkAuthForOperation(opCtx, dbname, explainObj);
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -135,12 +135,12 @@ public:
// copied from Command::execCommand and should be abstracted. Until then, make
// sure to keep it up to date.
repl::ReplicationCoordinator* replCoord = repl::getGlobalReplicationCoordinator();
- bool iAmPrimary = replCoord->canAcceptWritesForDatabase_UNSAFE(txn, dbname);
+ bool iAmPrimary = replCoord->canAcceptWritesForDatabase_UNSAFE(opCtx, dbname);
bool commandCanRunOnSecondary = commToExplain->slaveOk();
bool commandIsOverriddenToRunOnSecondary = commToExplain->slaveOverrideOk() &&
- rpc::ServerSelectionMetadata::get(txn).canRunOnSecondary();
- bool iAmStandalone = !txn->writesAreReplicated();
+ rpc::ServerSelectionMetadata::get(opCtx).canRunOnSecondary();
+ bool iAmStandalone = !opCtx->writesAreReplicated();
const bool canRunHere = iAmPrimary || commandCanRunOnSecondary ||
commandIsOverriddenToRunOnSecondary || iAmStandalone;
@@ -154,8 +154,12 @@ public:
}
// Actually call the nested command's explain(...) method.
- Status explainStatus = commToExplain->explain(
- txn, dbname, explainObj, verbosity, rpc::ServerSelectionMetadata::get(txn), &result);
+ Status explainStatus = commToExplain->explain(opCtx,
+ dbname,
+ explainObj,
+ verbosity,
+ rpc::ServerSelectionMetadata::get(opCtx),
+ &result);
if (!explainStatus.isOK()) {
return appendCommandStatus(result, explainStatus);
}
diff --git a/src/mongo/db/commands/fail_point_cmd.cpp b/src/mongo/db/commands/fail_point_cmd.cpp
index a298c267647..9e6795e9d78 100644
--- a/src/mongo/db/commands/fail_point_cmd.cpp
+++ b/src/mongo/db/commands/fail_point_cmd.cpp
@@ -90,7 +90,7 @@ public:
h << "modifies the settings of a fail point";
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
diff --git a/src/mongo/db/commands/feature_compatibility_version.cpp b/src/mongo/db/commands/feature_compatibility_version.cpp
index 29d3a96513e..d97f5cf0c54 100644
--- a/src/mongo/db/commands/feature_compatibility_version.cpp
+++ b/src/mongo/db/commands/feature_compatibility_version.cpp
@@ -193,14 +193,14 @@ StatusWith<ServerGlobalParams::FeatureCompatibility::Version> FeatureCompatibili
return version;
}
-void FeatureCompatibilityVersion::set(OperationContext* txn, StringData version) {
+void FeatureCompatibilityVersion::set(OperationContext* opCtx, StringData version) {
uassert(40284,
"featureCompatibilityVersion must be '3.4' or '3.2'. See "
"http://dochub.mongodb.org/core/3.4-feature-compatibility.",
version == FeatureCompatibilityVersionCommandParser::kVersion34 ||
version == FeatureCompatibilityVersionCommandParser::kVersion32);
- DBDirectClient client(txn);
+ DBDirectClient client(opCtx);
NamespaceString nss(FeatureCompatibilityVersion::kCollection);
if (version == FeatureCompatibilityVersionCommandParser::kVersion34) {
@@ -211,27 +211,28 @@ void FeatureCompatibilityVersion::set(OperationContext* txn, StringData version)
std::vector<BSONObj> indexSpecs{k32IncompatibleIndexSpec};
{
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetOrCreateDb autoDB(txn, nss.db(), MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ AutoGetOrCreateDb autoDB(opCtx, nss.db(), MODE_X);
uassert(ErrorCodes::NotMaster,
str::stream() << "Cannot set featureCompatibilityVersion to '" << version
<< "'. Not primary while attempting to create index on: "
<< nss.ns(),
- repl::ReplicationCoordinator::get(txn->getServiceContext())
- ->canAcceptWritesFor(txn, nss));
+ repl::ReplicationCoordinator::get(opCtx->getServiceContext())
+ ->canAcceptWritesFor(opCtx, nss));
IndexBuilder builder(k32IncompatibleIndexSpec, false);
- auto status = builder.buildInForeground(txn, autoDB.getDb());
+ auto status = builder.buildInForeground(opCtx, autoDB.getDb());
uassertStatusOK(status);
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
getGlobalServiceContext()->getOpObserver()->onCreateIndex(
- txn, autoDB.getDb()->getSystemIndexesName(), k32IncompatibleIndexSpec, false);
+ opCtx, autoDB.getDb()->getSystemIndexesName(), k32IncompatibleIndexSpec, false);
wuow.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "FeatureCompatibilityVersion::set", nss.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
+ opCtx, "FeatureCompatibilityVersion::set", nss.ns());
}
// We then update the featureCompatibilityVersion document stored in the
@@ -279,7 +280,7 @@ void FeatureCompatibilityVersion::set(OperationContext* txn, StringData version)
}
}
-void FeatureCompatibilityVersion::setIfCleanStartup(OperationContext* txn,
+void FeatureCompatibilityVersion::setIfCleanStartup(OperationContext* opCtx,
repl::StorageInterface* storageInterface) {
if (serverGlobalParams.clusterRole != ClusterRole::ShardServer) {
std::vector<std::string> dbNames;
@@ -292,7 +293,7 @@ void FeatureCompatibilityVersion::setIfCleanStartup(OperationContext* txn,
}
}
- UnreplicatedWritesBlock unreplicatedWritesBlock(txn);
+ UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx);
NamespaceString nss(FeatureCompatibilityVersion::kCollection);
// We build a v=2 index on the "admin.system.version" collection as part of setting the
@@ -302,11 +303,11 @@ void FeatureCompatibilityVersion::setIfCleanStartup(OperationContext* txn,
std::vector<BSONObj> indexSpecs{k32IncompatibleIndexSpec};
{
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetOrCreateDb autoDB(txn, nss.db(), MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ AutoGetOrCreateDb autoDB(opCtx, nss.db(), MODE_X);
IndexBuilder builder(k32IncompatibleIndexSpec, false);
- auto status = builder.buildInForeground(txn, autoDB.getDb());
+ auto status = builder.buildInForeground(opCtx, autoDB.getDb());
uassertStatusOK(status);
}
@@ -317,7 +318,7 @@ void FeatureCompatibilityVersion::setIfCleanStartup(OperationContext* txn,
// document when starting up, then on a subsequent start-up we'd no longer consider the data
// files "clean" and would instead be in featureCompatibilityVersion=3.2.
uassertStatusOK(storageInterface->insertDocument(
- txn,
+ opCtx,
nss,
BSON("_id" << FeatureCompatibilityVersion::kParameterName
<< FeatureCompatibilityVersion::kVersionField
@@ -372,7 +373,7 @@ public:
false // allowedToChangeAtRuntime
) {}
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
+ virtual void append(OperationContext* opCtx, BSONObjBuilder& b, const std::string& name) {
b.append(name,
getFeatureCompatibilityVersionString(
serverGlobalParams.featureCompatibility.version.load()));
diff --git a/src/mongo/db/commands/feature_compatibility_version.h b/src/mongo/db/commands/feature_compatibility_version.h
index 4bcb4b56e55..44029c72e21 100644
--- a/src/mongo/db/commands/feature_compatibility_version.h
+++ b/src/mongo/db/commands/feature_compatibility_version.h
@@ -66,13 +66,14 @@ public:
* available.
* 'version' should be '3.4' or '3.2'.
*/
- static void set(OperationContext* txn, StringData version);
+ static void set(OperationContext* opCtx, StringData version);
/**
* If there are no non-local databases and we are not running with --shardsvr, set
* featureCompatibilityVersion to 3.4.
*/
- static void setIfCleanStartup(OperationContext* txn, repl::StorageInterface* storageInterface);
+ static void setIfCleanStartup(OperationContext* opCtx,
+ repl::StorageInterface* storageInterface);
/**
* Examines a document inserted or updated in admin.system.version. If it is the
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index d0a260164d4..3aa7eb2018c 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -105,7 +105,7 @@ const DeleteStats* getDeleteStats(const PlanExecutor* exec) {
*
* If the operation failed, then an error Status is returned.
*/
-StatusWith<boost::optional<BSONObj>> advanceExecutor(OperationContext* txn,
+StatusWith<boost::optional<BSONObj>> advanceExecutor(OperationContext* opCtx,
PlanExecutor* exec,
bool isRemove) {
BSONObj value;
@@ -191,8 +191,8 @@ void appendCommandResponse(PlanExecutor* exec,
}
}
-Status checkCanAcceptWritesForDatabase(OperationContext* txn, const NamespaceString& nsString) {
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, nsString)) {
+Status checkCanAcceptWritesForDatabase(OperationContext* opCtx, const NamespaceString& nsString) {
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, nsString)) {
return Status(ErrorCodes::NotMaster,
str::stream()
<< "Not primary while running findAndModify command on collection "
@@ -201,12 +201,12 @@ Status checkCanAcceptWritesForDatabase(OperationContext* txn, const NamespaceStr
return Status::OK();
}
-void recordStatsForTopCommand(OperationContext* txn) {
- auto curOp = CurOp::get(txn);
+void recordStatsForTopCommand(OperationContext* opCtx) {
+ auto curOp = CurOp::get(opCtx);
const int writeLocked = 1;
- Top::get(txn->getClient()->getServiceContext())
- .record(txn,
+ Top::get(opCtx->getClient()->getServiceContext())
+ .record(opCtx,
curOp->getNS(),
curOp->getLogicalOp(),
writeLocked,
@@ -249,7 +249,7 @@ public:
return ReadWriteType::kWrite;
}
- Status explain(OperationContext* txn,
+ Status explain(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& cmdObj,
ExplainCommon::Verbosity verbosity,
@@ -269,14 +269,14 @@ public:
const FindAndModifyRequest& args = parseStatus.getValue();
const NamespaceString& nsString = args.getNamespaceString();
- OpDebug* opDebug = &CurOp::get(txn)->debug();
+ OpDebug* opDebug = &CurOp::get(opCtx)->debug();
if (args.isRemove()) {
DeleteRequest request(nsString);
const bool isExplain = true;
makeDeleteRequest(args, isExplain, &request);
- ParsedDelete parsedDelete(txn, &request);
+ ParsedDelete parsedDelete(opCtx, &request);
Status parsedDeleteStatus = parsedDelete.parseRequest();
if (!parsedDeleteStatus.isOK()) {
return parsedDeleteStatus;
@@ -284,18 +284,18 @@ public:
// Explain calls of the findAndModify command are read-only, but we take write
// locks so that the timing information is more accurate.
- AutoGetCollection autoColl(txn, nsString, MODE_IX);
+ AutoGetCollection autoColl(opCtx, nsString, MODE_IX);
if (!autoColl.getDb()) {
return {ErrorCodes::NamespaceNotFound,
str::stream() << "database " << dbName << " does not exist."};
}
- auto css = CollectionShardingState::get(txn, nsString);
- css->checkShardVersionOrThrow(txn);
+ auto css = CollectionShardingState::get(opCtx, nsString);
+ css->checkShardVersionOrThrow(opCtx);
Collection* const collection = autoColl.getCollection();
auto statusWithPlanExecutor =
- getExecutorDelete(txn, opDebug, collection, &parsedDelete);
+ getExecutorDelete(opCtx, opDebug, collection, &parsedDelete);
if (!statusWithPlanExecutor.isOK()) {
return statusWithPlanExecutor.getStatus();
}
@@ -307,7 +307,7 @@ public:
const bool isExplain = true;
makeUpdateRequest(args, isExplain, &updateLifecycle, &request);
- ParsedUpdate parsedUpdate(txn, &request);
+ ParsedUpdate parsedUpdate(opCtx, &request);
Status parsedUpdateStatus = parsedUpdate.parseRequest();
if (!parsedUpdateStatus.isOK()) {
return parsedUpdateStatus;
@@ -315,18 +315,18 @@ public:
// Explain calls of the findAndModify command are read-only, but we take write
// locks so that the timing information is more accurate.
- AutoGetCollection autoColl(txn, nsString, MODE_IX);
+ AutoGetCollection autoColl(opCtx, nsString, MODE_IX);
if (!autoColl.getDb()) {
return {ErrorCodes::NamespaceNotFound,
str::stream() << "database " << dbName << " does not exist."};
}
- auto css = CollectionShardingState::get(txn, nsString);
- css->checkShardVersionOrThrow(txn);
+ auto css = CollectionShardingState::get(opCtx, nsString);
+ css->checkShardVersionOrThrow(opCtx);
Collection* collection = autoColl.getCollection();
auto statusWithPlanExecutor =
- getExecutorUpdate(txn, opDebug, collection, &parsedUpdate);
+ getExecutorUpdate(opCtx, opDebug, collection, &parsedUpdate);
if (!statusWithPlanExecutor.isOK()) {
return statusWithPlanExecutor.getStatus();
}
@@ -337,14 +337,14 @@ public:
return Status::OK();
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbName,
BSONObj& cmdObj,
int options,
std::string& errmsg,
BSONObjBuilder& result) override {
// findAndModify command is not replicated directly.
- invariant(txn->writesAreReplicated());
+ invariant(opCtx->writesAreReplicated());
const NamespaceString fullNs = parseNsCollectionRequired(dbName, cmdObj);
Status allowedWriteStatus = userAllowedWriteNS(fullNs.ns());
if (!allowedWriteStatus.isOK()) {
@@ -362,21 +362,21 @@ public:
boost::optional<DisableDocumentValidation> maybeDisableValidation;
if (shouldBypassDocumentValidationForCommand(cmdObj))
- maybeDisableValidation.emplace(txn);
+ maybeDisableValidation.emplace(opCtx);
- auto client = txn->getClient();
+ auto client = opCtx->getClient();
auto lastOpAtOperationStart = repl::ReplClientInfo::forClient(client).getLastOp();
ScopeGuard lastOpSetterGuard =
MakeObjGuard(repl::ReplClientInfo::forClient(client),
&repl::ReplClientInfo::setLastOpToSystemLastOpTime,
- txn);
+ opCtx);
// If this is the local database, don't set last op.
if (dbName == "local") {
lastOpSetterGuard.Dismiss();
}
- auto curOp = CurOp::get(txn);
+ auto curOp = CurOp::get(opCtx);
OpDebug* opDebug = &curOp->debug();
// Although usually the PlanExecutor handles WCE internally, it will throw WCEs when it is
@@ -388,38 +388,38 @@ public:
const bool isExplain = false;
makeDeleteRequest(args, isExplain, &request);
- ParsedDelete parsedDelete(txn, &request);
+ ParsedDelete parsedDelete(opCtx, &request);
Status parsedDeleteStatus = parsedDelete.parseRequest();
if (!parsedDeleteStatus.isOK()) {
return appendCommandStatus(result, parsedDeleteStatus);
}
- AutoGetOrCreateDb autoDb(txn, dbName, MODE_IX);
- Lock::CollectionLock collLock(txn->lockState(), nsString.ns(), MODE_IX);
+ AutoGetOrCreateDb autoDb(opCtx, dbName, MODE_IX);
+ Lock::CollectionLock collLock(opCtx->lockState(), nsString.ns(), MODE_IX);
// Attach the namespace and database profiling level to the current op.
{
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->enter_inlock(nsString.ns().c_str(),
- autoDb.getDb()->getProfilingLevel());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->enter_inlock(nsString.ns().c_str(),
+ autoDb.getDb()->getProfilingLevel());
}
- auto css = CollectionShardingState::get(txn, nsString);
- css->checkShardVersionOrThrow(txn);
+ auto css = CollectionShardingState::get(opCtx, nsString);
+ css->checkShardVersionOrThrow(opCtx);
- Status isPrimary = checkCanAcceptWritesForDatabase(txn, nsString);
+ Status isPrimary = checkCanAcceptWritesForDatabase(opCtx, nsString);
if (!isPrimary.isOK()) {
return appendCommandStatus(result, isPrimary);
}
Collection* const collection = autoDb.getDb()->getCollection(nsString.ns());
- if (!collection && autoDb.getDb()->getViewCatalog()->lookup(txn, nsString.ns())) {
+ if (!collection && autoDb.getDb()->getViewCatalog()->lookup(opCtx, nsString.ns())) {
return appendCommandStatus(result,
{ErrorCodes::CommandNotSupportedOnView,
"findAndModify not supported on a view"});
}
auto statusWithPlanExecutor =
- getExecutorDelete(txn, opDebug, collection, &parsedDelete);
+ getExecutorDelete(opCtx, opDebug, collection, &parsedDelete);
if (!statusWithPlanExecutor.isOK()) {
return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
@@ -427,12 +427,12 @@ public:
std::move(statusWithPlanExecutor.getValue());
{
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
}
StatusWith<boost::optional<BSONObj>> advanceStatus =
- advanceExecutor(txn, exec.get(), args.isRemove());
+ advanceExecutor(opCtx, exec.get(), args.isRemove());
if (!advanceStatus.isOK()) {
return appendCommandStatus(result, advanceStatus.getStatus());
}
@@ -443,7 +443,7 @@ public:
PlanSummaryStats summaryStats;
Explain::getSummaryStats(*exec, &summaryStats);
if (collection) {
- collection->infoCache()->notifyOfQuery(txn, summaryStats.indexesUsed);
+ collection->infoCache()->notifyOfQuery(opCtx, summaryStats.indexesUsed);
}
opDebug->setPlanSummaryMetrics(summaryStats);
@@ -455,7 +455,7 @@ public:
Explain::getWinningPlanStats(exec.get(), &execStatsBob);
curOp->debug().execStats = execStatsBob.obj();
}
- recordStatsForTopCommand(txn);
+ recordStatsForTopCommand(opCtx);
boost::optional<BSONObj> value = advanceStatus.getValue();
appendCommandResponse(exec.get(), args.isRemove(), value, result);
@@ -465,32 +465,32 @@ public:
const bool isExplain = false;
makeUpdateRequest(args, isExplain, &updateLifecycle, &request);
- ParsedUpdate parsedUpdate(txn, &request);
+ ParsedUpdate parsedUpdate(opCtx, &request);
Status parsedUpdateStatus = parsedUpdate.parseRequest();
if (!parsedUpdateStatus.isOK()) {
return appendCommandStatus(result, parsedUpdateStatus);
}
- AutoGetOrCreateDb autoDb(txn, dbName, MODE_IX);
- Lock::CollectionLock collLock(txn->lockState(), nsString.ns(), MODE_IX);
+ AutoGetOrCreateDb autoDb(opCtx, dbName, MODE_IX);
+ Lock::CollectionLock collLock(opCtx->lockState(), nsString.ns(), MODE_IX);
// Attach the namespace and database profiling level to the current op.
{
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->enter_inlock(nsString.ns().c_str(),
- autoDb.getDb()->getProfilingLevel());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->enter_inlock(nsString.ns().c_str(),
+ autoDb.getDb()->getProfilingLevel());
}
- auto css = CollectionShardingState::get(txn, nsString);
- css->checkShardVersionOrThrow(txn);
+ auto css = CollectionShardingState::get(opCtx, nsString);
+ css->checkShardVersionOrThrow(opCtx);
- Status isPrimary = checkCanAcceptWritesForDatabase(txn, nsString);
+ Status isPrimary = checkCanAcceptWritesForDatabase(opCtx, nsString);
if (!isPrimary.isOK()) {
return appendCommandStatus(result, isPrimary);
}
Collection* collection = autoDb.getDb()->getCollection(nsString.ns());
- if (!collection && autoDb.getDb()->getViewCatalog()->lookup(txn, nsString.ns())) {
+ if (!collection && autoDb.getDb()->getViewCatalog()->lookup(opCtx, nsString.ns())) {
return appendCommandStatus(result,
{ErrorCodes::CommandNotSupportedOnView,
"findAndModify not supported on a view"});
@@ -503,7 +503,7 @@ public:
// in exclusive mode in order to create the collection.
collLock.relockAsDatabaseExclusive(autoDb.lock());
collection = autoDb.getDb()->getCollection(nsString.ns());
- Status isPrimaryAfterRelock = checkCanAcceptWritesForDatabase(txn, nsString);
+ Status isPrimaryAfterRelock = checkCanAcceptWritesForDatabase(opCtx, nsString);
if (!isPrimaryAfterRelock.isOK()) {
return appendCommandStatus(result, isPrimaryAfterRelock);
}
@@ -511,9 +511,9 @@ public:
if (collection) {
// Someone else beat us to creating the collection, do nothing.
} else {
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
Status createCollStatus =
- userCreateNS(txn, autoDb.getDb(), nsString.ns(), BSONObj());
+ userCreateNS(opCtx, autoDb.getDb(), nsString.ns(), BSONObj());
if (!createCollStatus.isOK()) {
return appendCommandStatus(result, createCollStatus);
}
@@ -525,7 +525,7 @@ public:
}
auto statusWithPlanExecutor =
- getExecutorUpdate(txn, opDebug, collection, &parsedUpdate);
+ getExecutorUpdate(opCtx, opDebug, collection, &parsedUpdate);
if (!statusWithPlanExecutor.isOK()) {
return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
@@ -533,12 +533,12 @@ public:
std::move(statusWithPlanExecutor.getValue());
{
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
}
StatusWith<boost::optional<BSONObj>> advanceStatus =
- advanceExecutor(txn, exec.get(), args.isRemove());
+ advanceExecutor(opCtx, exec.get(), args.isRemove());
if (!advanceStatus.isOK()) {
return appendCommandStatus(result, advanceStatus.getStatus());
}
@@ -549,7 +549,7 @@ public:
PlanSummaryStats summaryStats;
Explain::getSummaryStats(*exec, &summaryStats);
if (collection) {
- collection->infoCache()->notifyOfQuery(txn, summaryStats.indexesUsed);
+ collection->infoCache()->notifyOfQuery(opCtx, summaryStats.indexesUsed);
}
UpdateStage::recordUpdateStatsInOpDebug(getUpdateStats(exec.get()), opDebug);
opDebug->setPlanSummaryMetrics(summaryStats);
@@ -559,13 +559,13 @@ public:
Explain::getWinningPlanStats(exec.get(), &execStatsBob);
curOp->debug().execStats = execStatsBob.obj();
}
- recordStatsForTopCommand(txn);
+ recordStatsForTopCommand(opCtx);
boost::optional<BSONObj> value = advanceStatus.getValue();
appendCommandResponse(exec.get(), args.isRemove(), value, result);
}
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "findAndModify", nsString.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "findAndModify", nsString.ns());
if (repl::ReplClientInfo::forClient(client).getLastOp() != lastOpAtOperationStart) {
// If this operation has already generated a new lastOp, don't bother setting it here.
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index d2b5075e283..c3d8e88b227 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -129,7 +129,7 @@ public:
return AuthorizationSession::get(client)->checkAuthForFind(nss, hasTerm);
}
- Status explain(OperationContext* txn,
+ Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
ExplainCommon::Verbosity verbosity,
@@ -158,9 +158,9 @@ public:
// Finish the parsing step by using the QueryRequest to create a CanonicalQuery.
- ExtensionsCallbackReal extensionsCallback(txn, &nss);
+ ExtensionsCallbackReal extensionsCallback(opCtx, &nss);
auto statusWithCQ =
- CanonicalQuery::canonicalize(txn, std::move(qrStatus.getValue()), extensionsCallback);
+ CanonicalQuery::canonicalize(opCtx, std::move(qrStatus.getValue()), extensionsCallback);
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
@@ -168,7 +168,7 @@ public:
// Acquire locks. If the namespace is a view, we release our locks and convert the query
// request into an aggregation command.
- AutoGetCollectionOrViewForRead ctx(txn, nss);
+ AutoGetCollectionOrViewForRead ctx(opCtx, nss);
if (ctx.getView()) {
// Relinquish locks. The aggregation command will re-acquire them.
ctx.releaseLocksForView();
@@ -184,7 +184,7 @@ public:
std::string errmsg;
try {
- agg->run(txn, dbname, viewAggregationCommand.getValue(), 0, errmsg, *out);
+ agg->run(opCtx, dbname, viewAggregationCommand.getValue(), 0, errmsg, *out);
} catch (DBException& error) {
if (error.getCode() == ErrorCodes::InvalidPipelineOperator) {
return {ErrorCodes::InvalidPipelineOperator,
@@ -201,7 +201,7 @@ public:
// We have a parsed query. Time to get the execution plan for it.
auto statusWithPlanExecutor =
- getExecutorFind(txn, collection, nss, std::move(cq), PlanExecutor::YIELD_AUTO);
+ getExecutorFind(opCtx, collection, nss, std::move(cq), PlanExecutor::YIELD_AUTO);
if (!statusWithPlanExecutor.isOK()) {
return statusWithPlanExecutor.getStatus();
}
@@ -221,7 +221,7 @@ public:
* --Save state for getMore, transferring ownership of the executor to a ClientCursor.
* --Generate response to send to the client.
*/
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -237,7 +237,7 @@ public:
// Although it is a command, a find command gets counted as a query.
globalOpCounters.gotQuery();
- if (txn->getClient()->isInDirectClient()) {
+ if (opCtx->getClient()->isInDirectClient()) {
return appendCommandStatus(
result,
Status(ErrorCodes::IllegalOperation, "Cannot run find command from eval()"));
@@ -264,8 +264,8 @@ public:
// Validate term before acquiring locks, if provided.
if (auto term = qr->getReplicationTerm()) {
- auto replCoord = repl::ReplicationCoordinator::get(txn);
- Status status = replCoord->updateTerm(txn, *term);
+ auto replCoord = repl::ReplicationCoordinator::get(opCtx);
+ Status status = replCoord->updateTerm(opCtx, *term);
// Note: updateTerm returns ok if term stayed the same.
if (!status.isOK()) {
return appendCommandStatus(result, status);
@@ -279,11 +279,11 @@ public:
// find command parameters, so these fields are redundant.
const int ntoreturn = -1;
const int ntoskip = -1;
- beginQueryOp(txn, nss, cmdObj, ntoreturn, ntoskip);
+ beginQueryOp(opCtx, nss, cmdObj, ntoreturn, ntoskip);
// Finish the parsing step by using the QueryRequest to create a CanonicalQuery.
- ExtensionsCallbackReal extensionsCallback(txn, &nss);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
+ ExtensionsCallbackReal extensionsCallback(opCtx, &nss);
+ auto statusWithCQ = CanonicalQuery::canonicalize(opCtx, std::move(qr), extensionsCallback);
if (!statusWithCQ.isOK()) {
return appendCommandStatus(result, statusWithCQ.getStatus());
}
@@ -291,7 +291,7 @@ public:
// Acquire locks. If the query is on a view, we release our locks and convert the query
// request into an aggregation command.
- AutoGetCollectionOrViewForRead ctx(txn, nss);
+ AutoGetCollectionOrViewForRead ctx(opCtx, nss);
Collection* collection = ctx.getCollection();
if (ctx.getView()) {
// Relinquish locks. The aggregation command will re-acquire them.
@@ -306,7 +306,7 @@ public:
Command* agg = Command::findCommand("aggregate");
try {
- agg->run(txn, dbname, viewAggregationCommand.getValue(), options, errmsg, result);
+ agg->run(opCtx, dbname, viewAggregationCommand.getValue(), options, errmsg, result);
} catch (DBException& error) {
if (error.getCode() == ErrorCodes::InvalidPipelineOperator) {
return appendCommandStatus(
@@ -321,7 +321,7 @@ public:
// Get the execution plan for the query.
auto statusWithPlanExecutor =
- getExecutorFind(txn, collection, nss, std::move(cq), PlanExecutor::YIELD_AUTO);
+ getExecutorFind(opCtx, collection, nss, std::move(cq), PlanExecutor::YIELD_AUTO);
if (!statusWithPlanExecutor.isOK()) {
return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
@@ -329,8 +329,8 @@ public:
std::unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
{
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
}
if (!collection) {
@@ -338,7 +338,7 @@ public:
// there is no ClientCursor id, and then return.
const long long numResults = 0;
const CursorId cursorId = 0;
- endQueryOp(txn, collection, *exec, numResults, cursorId);
+ endQueryOp(opCtx, collection, *exec, numResults, cursorId);
appendCursorResponseObject(cursorId, nss.ns(), BSONArray(), &result);
return true;
}
@@ -378,12 +378,12 @@ public:
// Before saving the cursor, ensure that whatever plan we established happened with the
// expected collection version
- auto css = CollectionShardingState::get(txn, nss);
- css->checkShardVersionOrThrow(txn);
+ auto css = CollectionShardingState::get(opCtx, nss);
+ css->checkShardVersionOrThrow(opCtx);
// Set up the cursor for getMore.
CursorId cursorId = 0;
- if (shouldSaveCursor(txn, collection, state, exec.get())) {
+ if (shouldSaveCursor(opCtx, collection, state, exec.get())) {
// Register the execution plan inside a ClientCursor. Ownership of the PlanExecutor is
// transferred to the ClientCursor.
//
@@ -395,7 +395,7 @@ public:
ClientCursorPin pinnedCursor = collection->getCursorManager()->registerCursor(
{exec.release(),
nss.ns(),
- txn->recoveryUnit()->isReadingFromMajorityCommittedSnapshot(),
+ opCtx->recoveryUnit()->isReadingFromMajorityCommittedSnapshot(),
originalQR.getOptions(),
cmdObj.getOwned()});
cursorId = pinnedCursor.getCursor()->cursorid();
@@ -407,13 +407,13 @@ public:
cursorExec->saveState();
cursorExec->detachFromOperationContext();
- pinnedCursor.getCursor()->setLeftoverMaxTimeMicros(txn->getRemainingMaxTimeMicros());
+ pinnedCursor.getCursor()->setLeftoverMaxTimeMicros(opCtx->getRemainingMaxTimeMicros());
pinnedCursor.getCursor()->setPos(numResults);
// Fill out curop based on the results.
- endQueryOp(txn, collection, *cursorExec, numResults, cursorId);
+ endQueryOp(opCtx, collection, *cursorExec, numResults, cursorId);
} else {
- endQueryOp(txn, collection, *exec, numResults, cursorId);
+ endQueryOp(opCtx, collection, *exec, numResults, cursorId);
}
// Generate the response object to send to the client.
diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp
index 01d922cec02..dfe417b6c06 100644
--- a/src/mongo/db/commands/fsync.cpp
+++ b/src/mongo/db/commands/fsync.cpp
@@ -117,13 +117,13 @@ public:
actions.addAction(ActionType::fsync);
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
string& errmsg,
BSONObjBuilder& result) {
- if (txn->lockState()->isLocked()) {
+ if (opCtx->lockState()->isLocked()) {
errmsg = "fsync: Cannot execute fsync command from contexts that hold a data lock";
return false;
}
@@ -138,23 +138,23 @@ public:
// the simple fsync command case
if (sync) {
// can this be GlobalRead? and if it can, it should be nongreedy.
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite w(txn->lockState());
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite w(opCtx->lockState());
// TODO SERVER-26822: Replace MMAPv1 specific calls with ones that are storage
// engine agnostic.
- getDur().commitNow(txn);
+ getDur().commitNow(opCtx);
// No WriteUnitOfWork needed, as this does no writes of its own.
}
// Take a global IS lock to ensure the storage engine is not shutdown
- Lock::GlobalLock global(txn->lockState(), MODE_IS, UINT_MAX);
+ Lock::GlobalLock global(opCtx->lockState(), MODE_IS, UINT_MAX);
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- result.append("numFiles", storageEngine->flushAllFiles(txn, sync));
+ result.append("numFiles", storageEngine->flushAllFiles(opCtx, sync));
return true;
}
- Lock::ExclusiveLock lk(txn->lockState(), commandMutex);
+ Lock::ExclusiveLock lk(opCtx->lockState(), commandMutex);
if (!sync) {
errmsg = "fsync: sync option must be true when using lock";
return false;
@@ -292,7 +292,7 @@ public:
return isAuthorized ? Status::OK() : Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& db,
BSONObj& cmdObj,
int options,
@@ -300,7 +300,7 @@ public:
BSONObjBuilder& result) override {
log() << "command: unlock requested";
- Lock::ExclusiveLock lk(txn->lockState(), commandMutex);
+ Lock::ExclusiveLock lk(opCtx->lockState(), commandMutex);
if (unlockFsync()) {
const auto lockCount = fsyncCmd.getLockCount();
@@ -343,26 +343,26 @@ void FSyncLockThread::run() {
invariant(fsyncCmd.getLockCount_inLock() == 1);
try {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- ScopedTransaction transaction(&txn, MODE_X);
- Lock::GlobalWrite global(txn.lockState()); // No WriteUnitOfWork needed
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ ScopedTransaction transaction(&opCtx, MODE_X);
+ Lock::GlobalWrite global(opCtx.lockState()); // No WriteUnitOfWork needed
try {
// TODO SERVER-26822: Replace MMAPv1 specific calls with ones that are storage engine
// agnostic.
- getDur().syncDataAndTruncateJournal(&txn);
+ getDur().syncDataAndTruncateJournal(&opCtx);
} catch (const std::exception& e) {
error() << "error doing syncDataAndTruncateJournal: " << e.what();
fsyncCmd.threadStatus = Status(ErrorCodes::CommandFailed, e.what());
fsyncCmd.acquireFsyncLockSyncCV.notify_one();
return;
}
- txn.lockState()->downgradeGlobalXtoSForMMAPV1();
+ opCtx.lockState()->downgradeGlobalXtoSForMMAPV1();
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
try {
- storageEngine->flushAllFiles(&txn, true);
+ storageEngine->flushAllFiles(&opCtx, true);
} catch (const std::exception& e) {
error() << "error doing flushAll: " << e.what();
fsyncCmd.threadStatus = Status(ErrorCodes::CommandFailed, e.what());
@@ -371,9 +371,9 @@ void FSyncLockThread::run() {
}
try {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- uassertStatusOK(storageEngine->beginBackup(&txn));
+ uassertStatusOK(storageEngine->beginBackup(&opCtx));
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(&txn, "beginBackup", "global");
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(&opCtx, "beginBackup", "global");
} catch (const DBException& e) {
error() << "storage engine unable to begin backup : " << e.toString();
fsyncCmd.threadStatus = e.toStatus();
@@ -388,7 +388,7 @@ void FSyncLockThread::run() {
fsyncCmd.releaseFsyncLockSyncCV.wait(lk);
}
- storageEngine->endBackup(&txn);
+ storageEngine->endBackup(&opCtx);
} catch (const std::exception& e) {
severe() << "FSyncLockThread exception: " << e.what();
diff --git a/src/mongo/db/commands/generic.cpp b/src/mongo/db/commands/generic.cpp
index f554498bfbf..f58179be389 100644
--- a/src/mongo/db/commands/generic.cpp
+++ b/src/mongo/db/commands/generic.cpp
@@ -88,7 +88,7 @@ public:
help << "{ buildinfo:1 }";
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& jsobj,
int, // options
@@ -118,7 +118,7 @@ public:
virtual void addRequiredPrivileges(const std::string& dbname,
const BSONObj& cmdObj,
std::vector<Privilege>* out) {} // No auth required
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& badns,
BSONObj& cmdObj,
int,
@@ -144,7 +144,7 @@ public:
virtual void addRequiredPrivileges(const std::string& dbname,
const BSONObj& cmdObj,
std::vector<Privilege>* out) {} // No auth required
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& ns,
BSONObj& cmdObj,
int,
@@ -187,7 +187,7 @@ public:
actions.addAction(ActionType::hostInfo);
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -235,7 +235,7 @@ public:
actions.addAction(ActionType::logRotate);
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& ns,
BSONObj& cmdObj,
int,
@@ -267,7 +267,7 @@ public:
virtual void addRequiredPrivileges(const std::string& dbname,
const BSONObj& cmdObj,
std::vector<Privilege>* out) {} // No auth required
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& ns,
BSONObj& cmdObj,
int,
@@ -366,7 +366,7 @@ public:
const BSONObj& cmdObj,
std::vector<Privilege>* out) {} // No auth required
CmdForceError() : Command("forceerror") {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbnamne,
BSONObj& cmdObj,
int,
@@ -401,7 +401,7 @@ public:
help << "{ getLog : '*' } OR { getLog : 'global' }";
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -471,7 +471,7 @@ public:
actions.addAction(ActionType::getCmdLineOpts);
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
diff --git a/src/mongo/db/commands/geo_near_cmd.cpp b/src/mongo/db/commands/geo_near_cmd.cpp
index 159feb2e74f..3c26005121b 100644
--- a/src/mongo/db/commands/geo_near_cmd.cpp
+++ b/src/mongo/db/commands/geo_near_cmd.cpp
@@ -99,7 +99,7 @@ public:
out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -111,7 +111,7 @@ public:
}
const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
- AutoGetCollectionForRead ctx(txn, nss);
+ AutoGetCollectionForRead ctx(opCtx, nss);
Collection* collection = ctx.getCollection();
if (!collection) {
@@ -126,7 +126,8 @@ public:
// We seek to populate this.
string nearFieldName;
bool using2DIndex = false;
- if (!getFieldName(txn, collection, indexCatalog, &nearFieldName, &errmsg, &using2DIndex)) {
+ if (!getFieldName(
+ opCtx, collection, indexCatalog, &nearFieldName, &errmsg, &using2DIndex)) {
return false;
}
@@ -224,8 +225,8 @@ public:
qr->setProj(projObj);
qr->setLimit(numWanted);
qr->setCollation(collation);
- const ExtensionsCallbackReal extensionsCallback(txn, &nss);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
+ const ExtensionsCallbackReal extensionsCallback(opCtx, &nss);
+ auto statusWithCQ = CanonicalQuery::canonicalize(opCtx, std::move(qr), extensionsCallback);
if (!statusWithCQ.isOK()) {
errmsg = "Can't parse filter / create query";
return false;
@@ -237,7 +238,7 @@ public:
RangePreserver preserver(collection);
auto statusWithPlanExecutor =
- getExecutor(txn, collection, std::move(cq), PlanExecutor::YIELD_AUTO, 0);
+ getExecutor(opCtx, collection, std::move(cq), PlanExecutor::YIELD_AUTO, 0);
if (!statusWithPlanExecutor.isOK()) {
errmsg = "can't get query executor";
return false;
@@ -245,9 +246,9 @@ public:
unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
- auto curOp = CurOp::get(txn);
+ auto curOp = CurOp::get(opCtx);
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
}
@@ -334,7 +335,7 @@ public:
stats.appendIntOrLL("time", curOp->elapsedMicros() / 1000);
stats.done();
- collection->infoCache()->notifyOfQuery(txn, summary.indexesUsed);
+ collection->infoCache()->notifyOfQuery(opCtx, summary.indexesUsed);
curOp->debug().setPlanSummaryMetrics(summary);
@@ -348,7 +349,7 @@ public:
}
private:
- bool getFieldName(OperationContext* txn,
+ bool getFieldName(OperationContext* opCtx,
Collection* collection,
IndexCatalog* indexCatalog,
string* fieldOut,
@@ -357,7 +358,7 @@ private:
vector<IndexDescriptor*> idxs;
// First, try 2d.
- collection->getIndexCatalog()->findIndexByType(txn, IndexNames::GEO_2D, idxs);
+ collection->getIndexCatalog()->findIndexByType(opCtx, IndexNames::GEO_2D, idxs);
if (idxs.size() > 1) {
*errOut = "more than one 2d index, not sure which to run geoNear on";
return false;
@@ -378,7 +379,7 @@ private:
// Next, 2dsphere.
idxs.clear();
- collection->getIndexCatalog()->findIndexByType(txn, IndexNames::GEO_2DSPHERE, idxs);
+ collection->getIndexCatalog()->findIndexByType(opCtx, IndexNames::GEO_2DSPHERE, idxs);
if (0 == idxs.size()) {
*errOut = "no geo indices for geoNear";
return false;
diff --git a/src/mongo/db/commands/get_last_error.cpp b/src/mongo/db/commands/get_last_error.cpp
index e71dcf15210..0340a01139b 100644
--- a/src/mongo/db/commands/get_last_error.cpp
+++ b/src/mongo/db/commands/get_last_error.cpp
@@ -70,13 +70,13 @@ public:
help << "reset error state (used with getpreverror)";
}
CmdResetError() : Command("resetError", false, "reseterror") {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& db,
BSONObj& cmdObj,
int,
string& errmsg,
BSONObjBuilder& result) {
- LastError::get(txn->getClient()).reset();
+ LastError::get(opCtx->getClient()).reset();
return true;
}
} cmdResetError;
@@ -104,7 +104,7 @@ public:
<< " { wtimeout:m} - timeout for w in m milliseconds";
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -134,11 +134,11 @@ public:
// err is null.
//
- LastError* le = &LastError::get(txn->getClient());
+ LastError* le = &LastError::get(opCtx->getClient());
le->disable();
// Always append lastOp and connectionId
- Client& c = *txn->getClient();
+ Client& c = *opCtx->getClient();
auto replCoord = repl::getGlobalReplicationCoordinator();
if (replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet) {
const repl::OpTime lastOp = repl::ReplClientInfo::forClient(c).getLastOp();
@@ -224,7 +224,7 @@ public:
// Ensure options are valid for this host. Since getLastError doesn't do writes itself,
// treat it as if these are admin database writes, which need to be replicated so we do
// the strictest checks write concern checks.
- status = validateWriteConcern(txn, writeConcern, NamespaceString::kAdminDb);
+ status = validateWriteConcern(opCtx, writeConcern, NamespaceString::kAdminDb);
}
if (!status.isOK()) {
@@ -267,12 +267,12 @@ public:
}
{
- stdx::lock_guard<Client> lk(*txn->getClient());
- txn->setMessage_inlock("waiting for write concern");
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ opCtx->setMessage_inlock("waiting for write concern");
}
WriteConcernResult wcResult;
- status = waitForWriteConcern(txn, lastOpTime, writeConcern, &wcResult);
+ status = waitForWriteConcern(opCtx, lastOpTime, writeConcern, &wcResult);
wcResult.appendTo(writeConcern, &result);
// For backward compatibility with 2.4, wtimeout returns ok : 1.0
@@ -305,13 +305,13 @@ public:
const BSONObj& cmdObj,
std::vector<Privilege>* out) {} // No auth required
CmdGetPrevError() : Command("getPrevError", false, "getpreverror") {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
string& errmsg,
BSONObjBuilder& result) {
- LastError* le = &LastError::get(txn->getClient());
+ LastError* le = &LastError::get(opCtx->getClient());
le->disable();
le->appendSelf(result, true);
if (le->isValid())
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index 4bedfe06e01..ed4b43a81a2 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -144,23 +144,23 @@ public:
request.nss, request.cursorid, request.term.is_initialized());
}
- bool runParsed(OperationContext* txn,
+ bool runParsed(OperationContext* opCtx,
const NamespaceString& origNss,
const GetMoreRequest& request,
BSONObj& cmdObj,
std::string& errmsg,
BSONObjBuilder& result) {
- auto curOp = CurOp::get(txn);
+ auto curOp = CurOp::get(opCtx);
curOp->debug().cursorid = request.cursorid;
// Disable shard version checking - getmore commands are always unversioned
- OperationShardingState::get(txn).setShardVersion(request.nss, ChunkVersion::IGNORED());
+ OperationShardingState::get(opCtx).setShardVersion(request.nss, ChunkVersion::IGNORED());
// Validate term before acquiring locks, if provided.
if (request.term) {
- auto replCoord = repl::ReplicationCoordinator::get(txn);
- Status status = replCoord->updateTerm(txn, *request.term);
+ auto replCoord = repl::ReplicationCoordinator::get(opCtx);
+ Status status = replCoord->updateTerm(opCtx, *request.term);
// Note: updateTerm returns ok if term stayed the same.
if (!status.isOK()) {
return appendCommandStatus(result, status);
@@ -193,7 +193,7 @@ public:
if (request.nss.isListIndexesCursorNS() || request.nss.isListCollectionsCursorNS()) {
cursorManager = CursorManager::getGlobalCursorManager();
} else {
- ctx = stdx::make_unique<AutoGetCollectionOrViewForRead>(txn, request.nss);
+ ctx = stdx::make_unique<AutoGetCollectionOrViewForRead>(opCtx, request.nss);
auto viewCtx = static_cast<AutoGetCollectionOrViewForRead*>(ctx.get());
Collection* collection = ctx->getCollection();
if (!collection) {
@@ -202,7 +202,7 @@ public:
// unknown, resulting in an appropriate error.
if (viewCtx->getView()) {
auto resolved =
- viewCtx->getDb()->getViewCatalog()->resolveView(txn, request.nss);
+ viewCtx->getDb()->getViewCatalog()->resolveView(opCtx, request.nss);
if (!resolved.isOK()) {
return appendCommandStatus(result, resolved.getStatus());
}
@@ -210,7 +210,7 @@ public:
// Only one shardversion can be set at a time for an operation, so unset it
// here to allow setting it on the underlying namespace.
- OperationShardingState::get(txn).unsetShardVersion(request.nss);
+ OperationShardingState::get(opCtx).unsetShardVersion(request.nss);
GetMoreRequest newRequest(resolved.getValue().getNamespace(),
request.cursorid,
@@ -219,11 +219,11 @@ public:
request.term,
request.lastKnownCommittedOpTime);
- bool retVal = runParsed(txn, origNss, newRequest, cmdObj, errmsg, result);
+ bool retVal = runParsed(opCtx, origNss, newRequest, cmdObj, errmsg, result);
{
// Set the namespace of the curop back to the view namespace so ctx records
// stats on this view namespace on destruction.
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp->setNS_inlock(origNss.ns());
}
return retVal;
@@ -251,7 +251,7 @@ public:
invariant(!unpinCollLock);
sleepFor(Milliseconds(10));
ctx.reset();
- ctx = stdx::make_unique<AutoGetCollectionForRead>(txn, request.nss);
+ ctx = stdx::make_unique<AutoGetCollectionForRead>(opCtx, request.nss);
}
if (request.nss.ns() != cursor->ns()) {
@@ -289,15 +289,15 @@ public:
// On early return, get rid of the cursor.
ScopeGuard cursorFreer =
- MakeGuard(&GetMoreCmd::cleanupCursor, txn, &ccPin.getValue(), request);
+ MakeGuard(&GetMoreCmd::cleanupCursor, opCtx, &ccPin.getValue(), request);
if (cursor->isReadCommitted())
- uassertStatusOK(txn->recoveryUnit()->setReadFromMajorityCommittedSnapshot());
+ uassertStatusOK(opCtx->recoveryUnit()->setReadFromMajorityCommittedSnapshot());
// Reset timeout timer on the cursor since the cursor is still in use.
cursor->resetIdleTime();
- const bool hasOwnMaxTime = txn->hasDeadline();
+ const bool hasOwnMaxTime = opCtx->hasDeadline();
if (!hasOwnMaxTime) {
// There is no time limit set directly on this getMore command. If the cursor is
@@ -307,16 +307,16 @@ public:
if (isCursorAwaitData(cursor)) {
uassert(40117,
"Illegal attempt to set operation deadline within DBDirectClient",
- !txn->getClient()->isInDirectClient());
- txn->setDeadlineAfterNowBy(Seconds{1});
+ !opCtx->getClient()->isInDirectClient());
+ opCtx->setDeadlineAfterNowBy(Seconds{1});
} else if (cursor->getLeftoverMaxTimeMicros() < Microseconds::max()) {
uassert(40118,
"Illegal attempt to set operation deadline within DBDirectClient",
- !txn->getClient()->isInDirectClient());
- txn->setDeadlineAfterNowBy(cursor->getLeftoverMaxTimeMicros());
+ !opCtx->getClient()->isInDirectClient());
+ opCtx->setDeadlineAfterNowBy(cursor->getLeftoverMaxTimeMicros());
}
}
- txn->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
+ opCtx->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
if (cursor->isAggCursor()) {
// Agg cursors handle their own locking internally.
@@ -324,12 +324,12 @@ public:
}
PlanExecutor* exec = cursor->getExecutor();
- exec->reattachToOperationContext(txn);
+ exec->reattachToOperationContext(opCtx);
exec->restoreState();
auto planSummary = Explain::getPlanSummary(exec);
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp->setPlanSummary_inlock(planSummary);
// Ensure that the original query or command object is available in the slow query log,
@@ -378,7 +378,7 @@ public:
// If this is an await data cursor, and we hit EOF without generating any results, then
// we block waiting for new data to arrive.
if (isCursorAwaitData(cursor) && state == PlanExecutor::IS_EOF && numResults == 0) {
- auto replCoord = repl::ReplicationCoordinator::get(txn);
+ auto replCoord = repl::ReplicationCoordinator::get(opCtx);
// Return immediately if we need to update the commit time.
if (!request.lastKnownCommittedOpTime ||
(request.lastKnownCommittedOpTime == replCoord->getLastCommittedOpTime())) {
@@ -393,7 +393,7 @@ public:
ctx.reset();
// Block waiting for data.
- const auto timeout = txn->getRemainingMaxTimeMicros();
+ const auto timeout = opCtx->getRemainingMaxTimeMicros();
notifier->wait(notifierVersion, timeout);
notifier.reset();
@@ -402,7 +402,7 @@ public:
// CappedInsertNotifier.
curOp->setExpectedLatencyMs(durationCount<Milliseconds>(timeout));
- ctx.reset(new AutoGetCollectionForRead(txn, request.nss));
+ ctx.reset(new AutoGetCollectionForRead(opCtx, request.nss));
exec->restoreState();
// We woke up because either the timed_wait expired, or there was more data. Either
@@ -440,7 +440,7 @@ public:
// from a previous find, then don't roll remaining micros over to the next
// getMore.
if (!hasOwnMaxTime) {
- cursor->setLeftoverMaxTimeMicros(txn->getRemainingMaxTimeMicros());
+ cursor->setLeftoverMaxTimeMicros(opCtx->getRemainingMaxTimeMicros());
}
cursor->incPos(numResults);
@@ -463,16 +463,16 @@ public:
// earlier and need to reacquire it in order to clean up our ClientCursorPin.
if (cursor->isAggCursor()) {
invariant(NULL == ctx.get());
- unpinDBLock.reset(new Lock::DBLock(txn->lockState(), request.nss.db(), MODE_IS));
+ unpinDBLock.reset(new Lock::DBLock(opCtx->lockState(), request.nss.db(), MODE_IS));
unpinCollLock.reset(
- new Lock::CollectionLock(txn->lockState(), request.nss.ns(), MODE_IS));
+ new Lock::CollectionLock(opCtx->lockState(), request.nss.ns(), MODE_IS));
}
}
return true;
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -481,7 +481,7 @@ public:
// Counted as a getMore, not as a command.
globalOpCounters.gotGetMore();
- if (txn->getClient()->isInDirectClient()) {
+ if (opCtx->getClient()->isInDirectClient()) {
return appendCommandStatus(
result,
Status(ErrorCodes::IllegalOperation, "Cannot run getMore command from eval()"));
@@ -492,7 +492,7 @@ public:
return appendCommandStatus(result, parsedRequest.getStatus());
}
auto request = parsedRequest.getValue();
- return runParsed(txn, request.nss, request, cmdObj, errmsg, result);
+ return runParsed(opCtx, request.nss, request, cmdObj, errmsg, result);
}
/**
@@ -558,7 +558,7 @@ public:
* Called via a ScopeGuard on early return in order to ensure that the ClientCursor gets
* cleaned up properly.
*/
- static void cleanupCursor(OperationContext* txn,
+ static void cleanupCursor(OperationContext* opCtx,
ClientCursorPin* ccPin,
const GetMoreRequest& request) {
ClientCursor* cursor = ccPin->getCursor();
@@ -567,9 +567,9 @@ public:
std::unique_ptr<Lock::CollectionLock> unpinCollLock;
if (cursor->isAggCursor()) {
- unpinDBLock.reset(new Lock::DBLock(txn->lockState(), request.nss.db(), MODE_IS));
+ unpinDBLock.reset(new Lock::DBLock(opCtx->lockState(), request.nss.db(), MODE_IS));
unpinCollLock.reset(
- new Lock::CollectionLock(txn->lockState(), request.nss.ns(), MODE_IS));
+ new Lock::CollectionLock(opCtx->lockState(), request.nss.ns(), MODE_IS));
}
ccPin->deleteUnderlying();
diff --git a/src/mongo/db/commands/group_cmd.cpp b/src/mongo/db/commands/group_cmd.cpp
index 8d91be1f920..cf42c368e75 100644
--- a/src/mongo/db/commands/group_cmd.cpp
+++ b/src/mongo/db/commands/group_cmd.cpp
@@ -120,7 +120,7 @@ private:
return nss.ns();
}
- virtual Status explain(OperationContext* txn,
+ virtual Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
ExplainCommon::Verbosity verbosity,
@@ -134,11 +134,11 @@ private:
groupRequest.explain = true;
- AutoGetCollectionForRead ctx(txn, groupRequest.ns);
+ AutoGetCollectionForRead ctx(opCtx, groupRequest.ns);
Collection* coll = ctx.getCollection();
auto statusWithPlanExecutor =
- getExecutorGroup(txn, coll, groupRequest, PlanExecutor::YIELD_AUTO);
+ getExecutorGroup(opCtx, coll, groupRequest, PlanExecutor::YIELD_AUTO);
if (!statusWithPlanExecutor.isOK()) {
return statusWithPlanExecutor.getStatus();
}
@@ -149,7 +149,7 @@ private:
return Status::OK();
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -166,20 +166,20 @@ private:
return appendCommandStatus(result, parseRequestStatus);
}
- AutoGetCollectionForRead ctx(txn, groupRequest.ns);
+ AutoGetCollectionForRead ctx(opCtx, groupRequest.ns);
Collection* coll = ctx.getCollection();
auto statusWithPlanExecutor =
- getExecutorGroup(txn, coll, groupRequest, PlanExecutor::YIELD_AUTO);
+ getExecutorGroup(opCtx, coll, groupRequest, PlanExecutor::YIELD_AUTO);
if (!statusWithPlanExecutor.isOK()) {
return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
unique_ptr<PlanExecutor> planExecutor = std::move(statusWithPlanExecutor.getValue());
- auto curOp = CurOp::get(txn);
+ auto curOp = CurOp::get(opCtx);
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp->setPlanSummary_inlock(Explain::getPlanSummary(planExecutor.get()));
}
@@ -204,7 +204,7 @@ private:
PlanSummaryStats summaryStats;
Explain::getSummaryStats(*planExecutor, &summaryStats);
if (coll) {
- coll->infoCache()->notifyOfQuery(txn, summaryStats.indexesUsed);
+ coll->infoCache()->notifyOfQuery(opCtx, summaryStats.indexesUsed);
}
curOp->debug().setPlanSummaryMetrics(summaryStats);
diff --git a/src/mongo/db/commands/hashcmd.cpp b/src/mongo/db/commands/hashcmd.cpp
index 76c1960f804..f7e54703898 100644
--- a/src/mongo/db/commands/hashcmd.cpp
+++ b/src/mongo/db/commands/hashcmd.cpp
@@ -79,7 +79,7 @@ public:
*> "out" : NumberLong(6271151123721111923),
*> "ok" : 1 }
**/
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& db,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/db/commands/haystack.cpp b/src/mongo/db/commands/haystack.cpp
index dc44fef0e1d..d760ee9b866 100644
--- a/src/mongo/db/commands/haystack.cpp
+++ b/src/mongo/db/commands/haystack.cpp
@@ -95,7 +95,7 @@ public:
out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -103,7 +103,7 @@ public:
BSONObjBuilder& result) {
const NamespaceString nss = parseNsCollectionRequired(dbname, cmdObj);
- AutoGetCollectionForRead ctx(txn, nss);
+ AutoGetCollectionForRead ctx(opCtx, nss);
Collection* collection = ctx.getCollection();
if (!collection) {
@@ -112,7 +112,7 @@ public:
}
vector<IndexDescriptor*> idxs;
- collection->getIndexCatalog()->findIndexByType(txn, IndexNames::GEO_HAYSTACK, idxs);
+ collection->getIndexCatalog()->findIndexByType(opCtx, IndexNames::GEO_HAYSTACK, idxs);
if (idxs.size() == 0) {
errmsg = "no geoSearch index";
return false;
@@ -137,7 +137,7 @@ public:
IndexDescriptor* desc = idxs[0];
HaystackAccessMethod* ham =
static_cast<HaystackAccessMethod*>(collection->getIndexCatalog()->getIndex(desc));
- ham->searchCommand(txn,
+ ham->searchCommand(opCtx,
collection,
nearElt.Obj(),
maxDistance.numberDouble(),
diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp
index 3ec63ba635c..68230d587af 100644
--- a/src/mongo/db/commands/index_filter_commands.cpp
+++ b/src/mongo/db/commands/index_filter_commands.cpp
@@ -61,7 +61,7 @@ using namespace mongo;
/**
* Retrieves a collection's query settings and plan cache from the database.
*/
-static Status getQuerySettingsAndPlanCache(OperationContext* txn,
+static Status getQuerySettingsAndPlanCache(OperationContext* opCtx,
Collection* collection,
const string& ns,
QuerySettings** querySettingsOut,
@@ -115,14 +115,14 @@ using std::unique_ptr;
IndexFilterCommand::IndexFilterCommand(const string& name, const string& helpText)
: Command(name), helpText(helpText) {}
-bool IndexFilterCommand::run(OperationContext* txn,
+bool IndexFilterCommand::run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
- Status status = runIndexFilterCommand(txn, nss.ns(), cmdObj, &result);
+ Status status = runIndexFilterCommand(opCtx, nss.ns(), cmdObj, &result);
return appendCommandStatus(result, status);
}
@@ -160,17 +160,17 @@ ListFilters::ListFilters()
: IndexFilterCommand("planCacheListFilters",
"Displays index filters for all query shapes in a collection.") {}
-Status ListFilters::runIndexFilterCommand(OperationContext* txn,
+Status ListFilters::runIndexFilterCommand(OperationContext* opCtx,
const string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob) {
// This is a read lock. The query settings is owned by the collection.
- AutoGetCollectionForRead ctx(txn, NamespaceString(ns));
+ AutoGetCollectionForRead ctx(opCtx, NamespaceString(ns));
QuerySettings* querySettings;
PlanCache* unused;
Status status =
- getQuerySettingsAndPlanCache(txn, ctx.getCollection(), ns, &querySettings, &unused);
+ getQuerySettingsAndPlanCache(opCtx, ctx.getCollection(), ns, &querySettings, &unused);
if (!status.isOK()) {
// No collection - return empty array of filters.
BSONArrayBuilder hintsBuilder(bob->subarrayStart("filters"));
@@ -228,26 +228,26 @@ ClearFilters::ClearFilters()
"Clears index filter for a single query shape or, "
"if the query shape is omitted, all filters for the collection.") {}
-Status ClearFilters::runIndexFilterCommand(OperationContext* txn,
+Status ClearFilters::runIndexFilterCommand(OperationContext* opCtx,
const std::string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob) {
// This is a read lock. The query settings is owned by the collection.
- AutoGetCollectionForRead ctx(txn, NamespaceString(ns));
+ AutoGetCollectionForRead ctx(opCtx, NamespaceString(ns));
QuerySettings* querySettings;
PlanCache* planCache;
Status status =
- getQuerySettingsAndPlanCache(txn, ctx.getCollection(), ns, &querySettings, &planCache);
+ getQuerySettingsAndPlanCache(opCtx, ctx.getCollection(), ns, &querySettings, &planCache);
if (!status.isOK()) {
// No collection - do nothing.
return Status::OK();
}
- return clear(txn, querySettings, planCache, ns, cmdObj);
+ return clear(opCtx, querySettings, planCache, ns, cmdObj);
}
// static
-Status ClearFilters::clear(OperationContext* txn,
+Status ClearFilters::clear(OperationContext* opCtx,
QuerySettings* querySettings,
PlanCache* planCache,
const std::string& ns,
@@ -259,7 +259,7 @@ Status ClearFilters::clear(OperationContext* txn,
// - clear hints for single query shape when a query shape is described in the
// command arguments.
if (cmdObj.hasField("query")) {
- auto statusWithCQ = PlanCacheCommand::canonicalize(txn, ns, cmdObj);
+ auto statusWithCQ = PlanCacheCommand::canonicalize(opCtx, ns, cmdObj);
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
@@ -291,7 +291,7 @@ Status ClearFilters::clear(OperationContext* txn,
querySettings->clearAllowedIndices();
const NamespaceString nss(ns);
- const ExtensionsCallbackReal extensionsCallback(txn, &nss);
+ const ExtensionsCallbackReal extensionsCallback(opCtx, &nss);
// Remove corresponding entries from plan cache.
// Admin hints affect the planning process directly. If there were
@@ -312,7 +312,7 @@ Status ClearFilters::clear(OperationContext* txn,
qr->setSort(entry.sort);
qr->setProj(entry.projection);
qr->setCollation(entry.collation);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
+ auto statusWithCQ = CanonicalQuery::canonicalize(opCtx, std::move(qr), extensionsCallback);
invariantOK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -329,26 +329,26 @@ SetFilter::SetFilter()
: IndexFilterCommand("planCacheSetFilter",
"Sets index filter for a query shape. Overrides existing filter.") {}
-Status SetFilter::runIndexFilterCommand(OperationContext* txn,
+Status SetFilter::runIndexFilterCommand(OperationContext* opCtx,
const std::string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob) {
// This is a read lock. The query settings is owned by the collection.
const NamespaceString nss(ns);
- AutoGetCollectionForRead ctx(txn, nss);
+ AutoGetCollectionForRead ctx(opCtx, nss);
QuerySettings* querySettings;
PlanCache* planCache;
Status status =
- getQuerySettingsAndPlanCache(txn, ctx.getCollection(), ns, &querySettings, &planCache);
+ getQuerySettingsAndPlanCache(opCtx, ctx.getCollection(), ns, &querySettings, &planCache);
if (!status.isOK()) {
return status;
}
- return set(txn, querySettings, planCache, ns, cmdObj);
+ return set(opCtx, querySettings, planCache, ns, cmdObj);
}
// static
-Status SetFilter::set(OperationContext* txn,
+Status SetFilter::set(OperationContext* opCtx,
QuerySettings* querySettings,
PlanCache* planCache,
const string& ns,
@@ -385,7 +385,7 @@ Status SetFilter::set(OperationContext* txn,
}
}
- auto statusWithCQ = PlanCacheCommand::canonicalize(txn, ns, cmdObj);
+ auto statusWithCQ = PlanCacheCommand::canonicalize(opCtx, ns, cmdObj);
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
diff --git a/src/mongo/db/commands/index_filter_commands.h b/src/mongo/db/commands/index_filter_commands.h
index c34494b19d8..1fada8269a5 100644
--- a/src/mongo/db/commands/index_filter_commands.h
+++ b/src/mongo/db/commands/index_filter_commands.h
@@ -63,7 +63,7 @@ public:
* implement plan cache command functionality.
*/
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -92,7 +92,7 @@ public:
* Should contain just enough logic to invoke run*Command() function
* in query_settings.h
*/
- virtual Status runIndexFilterCommand(OperationContext* txn,
+ virtual Status runIndexFilterCommand(OperationContext* opCtx,
const std::string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob) = 0;
@@ -111,7 +111,7 @@ class ListFilters : public IndexFilterCommand {
public:
ListFilters();
- virtual Status runIndexFilterCommand(OperationContext* txn,
+ virtual Status runIndexFilterCommand(OperationContext* opCtx,
const std::string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob);
@@ -133,7 +133,7 @@ class ClearFilters : public IndexFilterCommand {
public:
ClearFilters();
- virtual Status runIndexFilterCommand(OperationContext* txn,
+ virtual Status runIndexFilterCommand(OperationContext* opCtx,
const std::string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob);
@@ -144,7 +144,7 @@ public:
* Namespace argument ns is ignored if we are clearing the entire cache.
* Removes corresponding entries from plan cache.
*/
- static Status clear(OperationContext* txn,
+ static Status clear(OperationContext* opCtx,
QuerySettings* querySettings,
PlanCache* planCache,
const std::string& ns,
@@ -167,7 +167,7 @@ class SetFilter : public IndexFilterCommand {
public:
SetFilter();
- virtual Status runIndexFilterCommand(OperationContext* txn,
+ virtual Status runIndexFilterCommand(OperationContext* opCtx,
const std::string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob);
@@ -176,7 +176,7 @@ public:
* Sets index filter for a query shape.
* Removes entry for query shape from plan cache.
*/
- static Status set(OperationContext* txn,
+ static Status set(OperationContext* opCtx,
QuerySettings* querySettings,
PlanCache* planCache,
const std::string& ns,
diff --git a/src/mongo/db/commands/index_filter_commands_test.cpp b/src/mongo/db/commands/index_filter_commands_test.cpp
index 0da61155048..218a93be606 100644
--- a/src/mongo/db/commands/index_filter_commands_test.cpp
+++ b/src/mongo/db/commands/index_filter_commands_test.cpp
@@ -119,7 +119,7 @@ PlanRankingDecision* createDecision(size_t numPlans) {
/**
* Injects an entry into plan cache for query shape.
*/
-void addQueryShapeToPlanCache(OperationContext* txn,
+void addQueryShapeToPlanCache(OperationContext* opCtx,
PlanCache* planCache,
const char* queryStr,
const char* sortStr,
@@ -132,7 +132,7 @@ void addQueryShapeToPlanCache(OperationContext* txn,
qr->setProj(fromjson(projectionStr));
qr->setCollation(fromjson(collationStr));
auto statusWithCQ =
- CanonicalQuery::canonicalize(txn, std::move(qr), ExtensionsCallbackDisallowExtensions());
+ CanonicalQuery::canonicalize(opCtx, std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -153,7 +153,7 @@ bool planCacheContains(const PlanCache& planCache,
const char* projectionStr,
const char* collationStr) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
// Create canonical query.
auto qr = stdx::make_unique<QueryRequest>(nss);
@@ -162,7 +162,7 @@ bool planCacheContains(const PlanCache& planCache,
qr->setProj(fromjson(projectionStr));
qr->setCollation(fromjson(collationStr));
auto statusWithInputQuery = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithInputQuery.getStatus());
unique_ptr<CanonicalQuery> inputQuery = std::move(statusWithInputQuery.getValue());
@@ -183,7 +183,7 @@ bool planCacheContains(const PlanCache& planCache,
qr->setProj(entry->projection);
qr->setCollation(entry->collation);
auto statusWithCurrentQuery = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCurrentQuery.getStatus());
unique_ptr<CanonicalQuery> currentQuery = std::move(statusWithCurrentQuery.getValue());
@@ -213,34 +213,34 @@ TEST(IndexFilterCommandsTest, ListFiltersEmpty) {
TEST(IndexFilterCommandsTest, ClearFiltersInvalidParameter) {
QuerySettings empty;
PlanCache planCache;
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
// If present, query has to be an object.
ASSERT_NOT_OK(
- ClearFilters::clear(&txn, &empty, &planCache, nss.ns(), fromjson("{query: 1234}")));
+ ClearFilters::clear(&opCtx, &empty, &planCache, nss.ns(), fromjson("{query: 1234}")));
// If present, sort must be an object.
ASSERT_NOT_OK(ClearFilters::clear(
- &txn, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, sort: 1234}")));
+ &opCtx, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, sort: 1234}")));
// If present, projection must be an object.
ASSERT_NOT_OK(ClearFilters::clear(
- &txn, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, projection: 1234}")));
+ &opCtx, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, projection: 1234}")));
// Query must pass canonicalization.
ASSERT_NOT_OK(ClearFilters::clear(
- &txn, &empty, &planCache, nss.ns(), fromjson("{query: {a: {$no_such_op: 1}}}")));
+ &opCtx, &empty, &planCache, nss.ns(), fromjson("{query: {a: {$no_such_op: 1}}}")));
// Sort present without query is an error.
ASSERT_NOT_OK(
- ClearFilters::clear(&txn, &empty, &planCache, nss.ns(), fromjson("{sort: {a: 1}}")));
+ ClearFilters::clear(&opCtx, &empty, &planCache, nss.ns(), fromjson("{sort: {a: 1}}")));
// Projection present without query is an error.
ASSERT_NOT_OK(ClearFilters::clear(
- &txn, &empty, &planCache, nss.ns(), fromjson("{projection: {_id: 0, a: 1}}")));
+ &opCtx, &empty, &planCache, nss.ns(), fromjson("{projection: {_id: 0, a: 1}}")));
}
TEST(IndexFilterCommandsTest, ClearNonexistentHint) {
QuerySettings querySettings;
PlanCache planCache;
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
- ASSERT_OK(SetFilter::set(&txn,
+ ASSERT_OK(SetFilter::set(&opCtx,
&querySettings,
&planCache,
nss.ns(),
@@ -251,7 +251,7 @@ TEST(IndexFilterCommandsTest, ClearNonexistentHint) {
// Clear nonexistent hint.
// Command should succeed and cache should remain unchanged.
ASSERT_OK(ClearFilters::clear(
- &txn, &querySettings, &planCache, nss.ns(), fromjson("{query: {b: 1}}")));
+ &opCtx, &querySettings, &planCache, nss.ns(), fromjson("{query: {b: 1}}")));
filters = getFilters(querySettings);
ASSERT_EQUALS(filters.size(), 1U);
}
@@ -263,53 +263,57 @@ TEST(IndexFilterCommandsTest, ClearNonexistentHint) {
TEST(IndexFilterCommandsTest, SetFilterInvalidParameter) {
QuerySettings empty;
PlanCache planCache;
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
- ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, nss.ns(), fromjson("{}")));
+ ASSERT_NOT_OK(SetFilter::set(&opCtx, &empty, &planCache, nss.ns(), fromjson("{}")));
// Missing required query field.
ASSERT_NOT_OK(
- SetFilter::set(&txn, &empty, &planCache, nss.ns(), fromjson("{indexes: [{a: 1}]}")));
+ SetFilter::set(&opCtx, &empty, &planCache, nss.ns(), fromjson("{indexes: [{a: 1}]}")));
// Missing required indexes field.
- ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}}")));
+ ASSERT_NOT_OK(
+ SetFilter::set(&opCtx, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}}")));
// Query has to be an object.
- ASSERT_NOT_OK(SetFilter::set(
- &txn, &empty, &planCache, nss.ns(), fromjson("{query: 1234, indexes: [{a: 1}, {b: 1}]}")));
+ ASSERT_NOT_OK(SetFilter::set(&opCtx,
+ &empty,
+ &planCache,
+ nss.ns(),
+ fromjson("{query: 1234, indexes: [{a: 1}, {b: 1}]}")));
// Indexes field has to be an array.
ASSERT_NOT_OK(SetFilter::set(
- &txn, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, indexes: 1234}")));
+ &opCtx, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, indexes: 1234}")));
// Array indexes field cannot empty.
ASSERT_NOT_OK(SetFilter::set(
- &txn, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, indexes: []}")));
+ &opCtx, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, indexes: []}")));
// Elements in indexes have to be objects.
ASSERT_NOT_OK(SetFilter::set(
- &txn, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, indexes: [{a: 1}, 99]}")));
+ &opCtx, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, indexes: [{a: 1}, 99]}")));
// Objects in indexes cannot be empty.
ASSERT_NOT_OK(SetFilter::set(
- &txn, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, indexes: [{a: 1}, {}]}")));
+ &opCtx, &empty, &planCache, nss.ns(), fromjson("{query: {a: 1}, indexes: [{a: 1}, {}]}")));
// If present, sort must be an object.
ASSERT_NOT_OK(
- SetFilter::set(&txn,
+ SetFilter::set(&opCtx,
&empty,
&planCache,
nss.ns(),
fromjson("{query: {a: 1}, sort: 1234, indexes: [{a: 1}, {b: 1}]}")));
// If present, projection must be an object.
ASSERT_NOT_OK(
- SetFilter::set(&txn,
+ SetFilter::set(&opCtx,
&empty,
&planCache,
nss.ns(),
fromjson("{query: {a: 1}, projection: 1234, indexes: [{a: 1}, {b: 1}]}")));
// If present, collation must be an object.
ASSERT_NOT_OK(
- SetFilter::set(&txn,
+ SetFilter::set(&opCtx,
&empty,
&planCache,
nss.ns(),
fromjson("{query: {a: 1}, collation: 1234, indexes: [{a: 1}, {b: 1}]}")));
// Query must pass canonicalization.
ASSERT_NOT_OK(
- SetFilter::set(&txn,
+ SetFilter::set(&opCtx,
&empty,
&planCache,
nss.ns(),
@@ -320,10 +324,10 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
QuerySettings querySettings;
PlanCache planCache;
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
// Inject query shape into plan cache.
- addQueryShapeToPlanCache(txn.get(),
+ addQueryShapeToPlanCache(opCtx.get(),
&planCache,
"{a: 1, b: 1}",
"{a: -1}",
@@ -332,7 +336,7 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
ASSERT_TRUE(planCacheContains(
planCache, "{a: 1, b: 1}", "{a: -1}", "{_id: 0, a: 1}", "{locale: 'mock_reverse_string'}"));
- ASSERT_OK(SetFilter::set(txn.get(),
+ ASSERT_OK(SetFilter::set(opCtx.get(),
&querySettings,
&planCache,
nss.ns(),
@@ -355,7 +359,7 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
// Replacing the hint for the same query shape ({a: 1, b: 1} and {b: 2, a: 3}
// share same shape) should not change the query settings size.
- ASSERT_OK(SetFilter::set(txn.get(),
+ ASSERT_OK(SetFilter::set(opCtx.get(),
&querySettings,
&planCache,
nss.ns(),
@@ -371,7 +375,7 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
ASSERT_BSONOBJ_EQ(filterArray[0].Obj(), fromjson("{a: 1, b: 1}"));
// Add hint for different query shape.
- ASSERT_OK(SetFilter::set(txn.get(),
+ ASSERT_OK(SetFilter::set(opCtx.get(),
&querySettings,
&planCache,
nss.ns(),
@@ -380,7 +384,7 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
ASSERT_EQUALS(filters.size(), 2U);
// Add hint for 3rd query shape. This is to prepare for ClearHint tests.
- ASSERT_OK(SetFilter::set(txn.get(),
+ ASSERT_OK(SetFilter::set(opCtx.get(),
&querySettings,
&planCache,
nss.ns(),
@@ -389,12 +393,12 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
ASSERT_EQUALS(filters.size(), 3U);
// Add 2 entries to plan cache and check plan cache after clearing one/all filters.
- addQueryShapeToPlanCache(txn.get(), &planCache, "{a: 1}", "{}", "{}", "{}");
- addQueryShapeToPlanCache(txn.get(), &planCache, "{b: 1}", "{}", "{}", "{}");
+ addQueryShapeToPlanCache(opCtx.get(), &planCache, "{a: 1}", "{}", "{}", "{}");
+ addQueryShapeToPlanCache(opCtx.get(), &planCache, "{b: 1}", "{}", "{}", "{}");
// Clear single hint.
ASSERT_OK(ClearFilters::clear(
- txn.get(), &querySettings, &planCache, nss.ns(), fromjson("{query: {a: 1}}")));
+ opCtx.get(), &querySettings, &planCache, nss.ns(), fromjson("{query: {a: 1}}")));
filters = getFilters(querySettings);
ASSERT_EQUALS(filters.size(), 2U);
@@ -403,7 +407,8 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
ASSERT_TRUE(planCacheContains(planCache, "{b: 1}", "{}", "{}", "{}"));
// Clear all filters
- ASSERT_OK(ClearFilters::clear(txn.get(), &querySettings, &planCache, nss.ns(), fromjson("{}")));
+ ASSERT_OK(
+ ClearFilters::clear(opCtx.get(), &querySettings, &planCache, nss.ns(), fromjson("{}")));
filters = getFilters(querySettings);
ASSERT_TRUE(filters.empty());
@@ -413,7 +418,7 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
TEST(IndexFilterCommandsTest, SetAndClearFiltersCollation) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
QuerySettings querySettings;
// Create a plan cache. Add an index so that indexability is included in the plan cache keys.
@@ -423,13 +428,13 @@ TEST(IndexFilterCommandsTest, SetAndClearFiltersCollation) {
// Inject query shapes with and without collation into plan cache.
addQueryShapeToPlanCache(
- txn.get(), &planCache, "{a: 'foo'}", "{}", "{}", "{locale: 'mock_reverse_string'}");
- addQueryShapeToPlanCache(txn.get(), &planCache, "{a: 'foo'}", "{}", "{}", "{}");
+ opCtx.get(), &planCache, "{a: 'foo'}", "{}", "{}", "{locale: 'mock_reverse_string'}");
+ addQueryShapeToPlanCache(opCtx.get(), &planCache, "{a: 'foo'}", "{}", "{}", "{}");
ASSERT_TRUE(
planCacheContains(planCache, "{a: 'foo'}", "{}", "{}", "{locale: 'mock_reverse_string'}"));
ASSERT_TRUE(planCacheContains(planCache, "{a: 'foo'}", "{}", "{}", "{}"));
- ASSERT_OK(SetFilter::set(txn.get(),
+ ASSERT_OK(SetFilter::set(opCtx.get(),
&querySettings,
&planCache,
nss.ns(),
@@ -450,7 +455,7 @@ TEST(IndexFilterCommandsTest, SetAndClearFiltersCollation) {
ASSERT_TRUE(planCacheContains(planCache, "{a: 'foo'}", "{}", "{}", "{}"));
// Add filter for query shape without collation.
- ASSERT_OK(SetFilter::set(txn.get(),
+ ASSERT_OK(SetFilter::set(opCtx.get(),
&querySettings,
&planCache,
nss.ns(),
@@ -460,12 +465,12 @@ TEST(IndexFilterCommandsTest, SetAndClearFiltersCollation) {
// Add plan cache entries for both queries.
addQueryShapeToPlanCache(
- txn.get(), &planCache, "{a: 'foo'}", "{}", "{}", "{locale: 'mock_reverse_string'}");
- addQueryShapeToPlanCache(txn.get(), &planCache, "{a: 'foo'}", "{}", "{}", "{}");
+ opCtx.get(), &planCache, "{a: 'foo'}", "{}", "{}", "{locale: 'mock_reverse_string'}");
+ addQueryShapeToPlanCache(opCtx.get(), &planCache, "{a: 'foo'}", "{}", "{}", "{}");
// Clear filter for query with collation.
ASSERT_OK(ClearFilters::clear(
- txn.get(),
+ opCtx.get(),
&querySettings,
&planCache,
nss.ns(),
@@ -490,7 +495,7 @@ TEST(IndexFilterCommandsTest, SetFilterAcceptsIndexNames) {
fromjson("{a: 1}"), false, false, false, "a_1:rev", nullptr, BSONObj());
collatedIndex.collator = &reverseCollator;
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
QuerySettings querySettings;
PlanCache planCache;
@@ -498,10 +503,10 @@ TEST(IndexFilterCommandsTest, SetFilterAcceptsIndexNames) {
{IndexEntry(fromjson("{a: 1}"), false, false, false, "a_1", nullptr, BSONObj()),
collatedIndex});
- addQueryShapeToPlanCache(txn.get(), &planCache, "{a: 2}", "{}", "{}", "{}");
+ addQueryShapeToPlanCache(opCtx.get(), &planCache, "{a: 2}", "{}", "{}", "{}");
ASSERT_TRUE(planCacheContains(planCache, "{a: 2}", "{}", "{}", "{}"));
- ASSERT_OK(SetFilter::set(txn.get(),
+ ASSERT_OK(SetFilter::set(opCtx.get(),
&querySettings,
&planCache,
nss.ns(),
diff --git a/src/mongo/db/commands/isself.cpp b/src/mongo/db/commands/isself.cpp
index 0db7ba01440..6e8c1509b5f 100644
--- a/src/mongo/db/commands/isself.cpp
+++ b/src/mongo/db/commands/isself.cpp
@@ -54,7 +54,7 @@ public:
virtual void addRequiredPrivileges(const std::string& dbname,
const BSONObj& cmdObj,
std::vector<Privilege>* out) {} // No auth required
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
diff --git a/src/mongo/db/commands/kill_op.cpp b/src/mongo/db/commands/kill_op.cpp
index f96f3692ecd..1d16113ca93 100644
--- a/src/mongo/db/commands/kill_op.cpp
+++ b/src/mongo/db/commands/kill_op.cpp
@@ -128,7 +128,7 @@ public:
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& db,
BSONObj& cmdObj,
int options,
@@ -138,12 +138,12 @@ public:
log() << "going to kill op: " << opId;
result.append("info", "attempting to kill op");
- auto swLkAndOp = _findOp(txn->getClient(), opId);
+ auto swLkAndOp = _findOp(opCtx->getClient(), opId);
if (swLkAndOp.isOK()) {
stdx::unique_lock<Client> lk;
OperationContext* opCtx;
std::tie(lk, opCtx) = std::move(swLkAndOp.getValue());
- txn->getServiceContext()->killOperation(opCtx);
+ opCtx->getServiceContext()->killOperation(opCtx);
}
return true;
diff --git a/src/mongo/db/commands/killcursors_cmd.cpp b/src/mongo/db/commands/killcursors_cmd.cpp
index 5831d3b2cc0..e51e4d65f8a 100644
--- a/src/mongo/db/commands/killcursors_cmd.cpp
+++ b/src/mongo/db/commands/killcursors_cmd.cpp
@@ -45,7 +45,9 @@ public:
KillCursorsCmd() = default;
private:
- Status _killCursor(OperationContext* txn, const NamespaceString& nss, CursorId cursorId) final {
+ Status _killCursor(OperationContext* opCtx,
+ const NamespaceString& nss,
+ CursorId cursorId) final {
std::unique_ptr<AutoGetCollectionOrViewForRead> ctx;
CursorManager* cursorManager;
@@ -55,22 +57,22 @@ private:
// data within a collection.
cursorManager = CursorManager::getGlobalCursorManager();
} else {
- ctx = stdx::make_unique<AutoGetCollectionOrViewForRead>(txn, nss);
+ ctx = stdx::make_unique<AutoGetCollectionOrViewForRead>(opCtx, nss);
Collection* collection = ctx->getCollection();
ViewDefinition* view = ctx->getView();
if (view) {
Database* db = ctx->getDb();
- auto resolved = db->getViewCatalog()->resolveView(txn, nss);
+ auto resolved = db->getViewCatalog()->resolveView(opCtx, nss);
if (!resolved.isOK()) {
return resolved.getStatus();
}
ctx->releaseLocksForView();
- Status status = _killCursor(txn, resolved.getValue().getNamespace(), cursorId);
+ Status status = _killCursor(opCtx, resolved.getValue().getNamespace(), cursorId);
{
// Set the namespace of the curop back to the view namespace so ctx records
// stats on this view namespace on destruction.
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setNS_inlock(nss.ns());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setNS_inlock(nss.ns());
}
return status;
}
@@ -82,7 +84,7 @@ private:
}
invariant(cursorManager);
- return cursorManager->eraseCursor(txn, cursorId, true /*shouldAudit*/);
+ return cursorManager->eraseCursor(opCtx, cursorId, true /*shouldAudit*/);
}
} killCursorsCmd;
diff --git a/src/mongo/db/commands/killcursors_common.cpp b/src/mongo/db/commands/killcursors_common.cpp
index 194882feee2..570c1e1df0e 100644
--- a/src/mongo/db/commands/killcursors_common.cpp
+++ b/src/mongo/db/commands/killcursors_common.cpp
@@ -63,7 +63,7 @@ Status KillCursorsCmdBase::checkAuthForCommand(Client* client,
return Status::OK();
}
-bool KillCursorsCmdBase::run(OperationContext* txn,
+bool KillCursorsCmdBase::run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -81,7 +81,7 @@ bool KillCursorsCmdBase::run(OperationContext* txn,
std::vector<CursorId> cursorsUnknown;
for (CursorId id : killCursorsRequest.cursorIds) {
- Status status = _killCursor(txn, killCursorsRequest.nss, id);
+ Status status = _killCursor(opCtx, killCursorsRequest.nss, id);
if (status.isOK()) {
cursorsKilled.push_back(id);
} else if (status.code() == ErrorCodes::CursorNotFound) {
@@ -91,7 +91,7 @@ bool KillCursorsCmdBase::run(OperationContext* txn,
}
audit::logKillCursorsAuthzCheck(
- txn->getClient(), killCursorsRequest.nss, id, status.code());
+ opCtx->getClient(), killCursorsRequest.nss, id, status.code());
}
KillCursorsResponse killCursorsResponse(
diff --git a/src/mongo/db/commands/killcursors_common.h b/src/mongo/db/commands/killcursors_common.h
index 3f66f845ef0..c5b6e9db31d 100644
--- a/src/mongo/db/commands/killcursors_common.h
+++ b/src/mongo/db/commands/killcursors_common.h
@@ -70,7 +70,7 @@ public:
const std::string& dbname,
const BSONObj& cmdObj) final;
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -79,12 +79,12 @@ public:
private:
/**
- * Kill the cursor with id 'cursorId' in namespace 'nss'. Use 'txn' if necessary.
+ * Kill the cursor with id 'cursorId' in namespace 'nss'. Use 'opCtx' if necessary.
*
* Returns Status::OK() if the cursor was killed, or ErrorCodes::CursorNotFound if there is no
* such cursor, or ErrorCodes::OperationFailed if the cursor cannot be killed.
*/
- virtual Status _killCursor(OperationContext* txn,
+ virtual Status _killCursor(OperationContext* opCtx,
const NamespaceString& nss,
CursorId cursorId) = 0;
};
diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp
index 0309abe8c7c..68a6337f548 100644
--- a/src/mongo/db/commands/list_collections.cpp
+++ b/src/mongo/db/commands/list_collections.cpp
@@ -111,7 +111,7 @@ boost::optional<vector<StringData>> _getExactNameMatches(const MatchExpression*
*
* Does not add any information about the system.namespaces collection, or non-existent collections.
*/
-void _addWorkingSetMember(OperationContext* txn,
+void _addWorkingSetMember(OperationContext* opCtx,
const BSONObj& maybe,
const MatchExpression* matcher,
WorkingSet* ws,
@@ -147,7 +147,7 @@ BSONObj buildViewBson(const ViewDefinition& view) {
return b.obj();
}
-BSONObj buildCollectionBson(OperationContext* txn, const Collection* collection) {
+BSONObj buildCollectionBson(OperationContext* opCtx, const Collection* collection) {
if (!collection) {
return {};
@@ -162,13 +162,13 @@ BSONObj buildCollectionBson(OperationContext* txn, const Collection* collection)
b.append("name", collectionName);
b.append("type", "collection");
- CollectionOptions options = collection->getCatalogEntry()->getCollectionOptions(txn);
+ CollectionOptions options = collection->getCatalogEntry()->getCollectionOptions(opCtx);
b.append("options", options.toBSON());
BSONObj info = BSON("readOnly" << storageGlobalParams.readOnly);
b.append("info", info);
- auto idIndex = collection->getIndexCatalog()->findIdIndex(txn);
+ auto idIndex = collection->getIndexCatalog()->findIdIndex(opCtx);
if (idIndex) {
b.append("idIndex", idIndex->infoObj());
}
@@ -216,7 +216,7 @@ public:
CmdListCollections() : Command("listCollections") {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& jsobj,
int,
@@ -247,29 +247,29 @@ public:
return appendCommandStatus(result, parseCursorStatus);
}
- ScopedTransaction scopedXact(txn, MODE_IS);
- AutoGetDb autoDb(txn, dbname, MODE_S);
+ ScopedTransaction scopedXact(opCtx, MODE_IS);
+ AutoGetDb autoDb(opCtx, dbname, MODE_S);
Database* db = autoDb.getDb();
auto ws = make_unique<WorkingSet>();
- auto root = make_unique<QueuedDataStage>(txn, ws.get());
+ auto root = make_unique<QueuedDataStage>(opCtx, ws.get());
if (db) {
if (auto collNames = _getExactNameMatches(matcher.get())) {
for (auto&& collName : *collNames) {
auto nss = NamespaceString(db->name(), collName);
Collection* collection = db->getCollection(nss);
- BSONObj collBson = buildCollectionBson(txn, collection);
+ BSONObj collBson = buildCollectionBson(opCtx, collection);
if (!collBson.isEmpty()) {
- _addWorkingSetMember(txn, collBson, matcher.get(), ws.get(), root.get());
+ _addWorkingSetMember(opCtx, collBson, matcher.get(), ws.get(), root.get());
}
}
} else {
for (auto&& collection : *db) {
- BSONObj collBson = buildCollectionBson(txn, collection);
+ BSONObj collBson = buildCollectionBson(opCtx, collection);
if (!collBson.isEmpty()) {
- _addWorkingSetMember(txn, collBson, matcher.get(), ws.get(), root.get());
+ _addWorkingSetMember(opCtx, collBson, matcher.get(), ws.get(), root.get());
}
}
}
@@ -279,10 +279,10 @@ public:
SimpleBSONObjComparator::kInstance.evaluate(
filterElt.Obj() == ListCollectionsFilter::makeTypeCollectionFilter());
if (!skipViews) {
- db->getViewCatalog()->iterate(txn, [&](const ViewDefinition& view) {
+ db->getViewCatalog()->iterate(opCtx, [&](const ViewDefinition& view) {
BSONObj viewBson = buildViewBson(view);
if (!viewBson.isEmpty()) {
- _addWorkingSetMember(txn, viewBson, matcher.get(), ws.get(), root.get());
+ _addWorkingSetMember(opCtx, viewBson, matcher.get(), ws.get(), root.get());
}
});
}
@@ -291,7 +291,7 @@ public:
const NamespaceString cursorNss = NamespaceString::makeListCollectionsNSS(dbname);
auto statusWithPlanExecutor = PlanExecutor::make(
- txn, std::move(ws), std::move(root), cursorNss.ns(), PlanExecutor::YIELD_MANUAL);
+ opCtx, std::move(ws), std::move(root), cursorNss.ns(), PlanExecutor::YIELD_MANUAL);
if (!statusWithPlanExecutor.isOK()) {
return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
@@ -323,7 +323,7 @@ public:
auto pinnedCursor = CursorManager::getGlobalCursorManager()->registerCursor(
{exec.release(),
cursorNss.ns(),
- txn->recoveryUnit()->isReadingFromMajorityCommittedSnapshot()});
+ opCtx->recoveryUnit()->isReadingFromMajorityCommittedSnapshot()});
cursorId = pinnedCursor.getCursor()->cursorid();
}
diff --git a/src/mongo/db/commands/list_databases.cpp b/src/mongo/db/commands/list_databases.cpp
index 08de6f6cd6f..ccc2f82cc49 100644
--- a/src/mongo/db/commands/list_databases.cpp
+++ b/src/mongo/db/commands/list_databases.cpp
@@ -83,7 +83,7 @@ public:
CmdListDatabases() : Command("listDatabases", true) {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& jsobj,
int,
@@ -114,8 +114,8 @@ public:
vector<string> dbNames;
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
{
- ScopedTransaction transaction(txn, MODE_IS);
- Lock::GlobalLock lk(txn->lockState(), MODE_IS, UINT_MAX);
+ ScopedTransaction transaction(opCtx, MODE_IS);
+ Lock::GlobalLock lk(opCtx->lockState(), MODE_IS, UINT_MAX);
storageEngine->listDatabases(&dbNames);
}
@@ -135,17 +135,17 @@ public:
if (filterNameOnly && !filter->matchesBSON(b.asTempObj()))
continue;
- ScopedTransaction transaction(txn, MODE_IS);
- Lock::DBLock dbLock(txn->lockState(), dbname, MODE_IS);
+ ScopedTransaction transaction(opCtx, MODE_IS);
+ Lock::DBLock dbLock(opCtx->lockState(), dbname, MODE_IS);
- Database* db = dbHolder().get(txn, dbname);
+ Database* db = dbHolder().get(opCtx, dbname);
if (!db)
continue;
const DatabaseCatalogEntry* entry = db->getDatabaseCatalogEntry();
invariant(entry);
- size = entry->sizeOnDisk(txn);
+ size = entry->sizeOnDisk(opCtx);
b.append("sizeOnDisk", static_cast<double>(size));
b.appendBool("empty", entry->isEmpty());
diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp
index 59528d8bf11..940edb13eda 100644
--- a/src/mongo/db/commands/list_indexes.cpp
+++ b/src/mongo/db/commands/list_indexes.cpp
@@ -116,7 +116,7 @@ public:
CmdListIndexes() : Command("listIndexes") {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -132,7 +132,7 @@ public:
return appendCommandStatus(result, parseCursorStatus);
}
- AutoGetCollectionForRead autoColl(txn, ns);
+ AutoGetCollectionForRead autoColl(opCtx, ns);
if (!autoColl.getDb()) {
return appendCommandStatus(result,
Status(ErrorCodes::NamespaceNotFound, "no database"));
@@ -150,19 +150,19 @@ public:
vector<string> indexNames;
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
indexNames.clear();
- cce->getAllIndexes(txn, &indexNames);
+ cce->getAllIndexes(opCtx, &indexNames);
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "listIndexes", ns.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "listIndexes", ns.ns());
auto ws = make_unique<WorkingSet>();
- auto root = make_unique<QueuedDataStage>(txn, ws.get());
+ auto root = make_unique<QueuedDataStage>(opCtx, ws.get());
for (size_t i = 0; i < indexNames.size(); i++) {
BSONObj indexSpec;
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- indexSpec = cce->getIndexSpec(txn, indexNames[i]);
+ indexSpec = cce->getIndexSpec(opCtx, indexNames[i]);
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "listIndexes", ns.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "listIndexes", ns.ns());
if (ns.ns() == FeatureCompatibilityVersion::kCollection &&
indexNames[i] == FeatureCompatibilityVersion::k32IncompatibleIndexName) {
@@ -198,7 +198,7 @@ public:
dassert(ns == cursorNss.getTargetNSForListIndexes());
auto statusWithPlanExecutor = PlanExecutor::make(
- txn, std::move(ws), std::move(root), cursorNss.ns(), PlanExecutor::YIELD_MANUAL);
+ opCtx, std::move(ws), std::move(root), cursorNss.ns(), PlanExecutor::YIELD_MANUAL);
if (!statusWithPlanExecutor.isOK()) {
return appendCommandStatus(result, statusWithPlanExecutor.getStatus());
}
@@ -230,7 +230,7 @@ public:
auto pinnedCursor = CursorManager::getGlobalCursorManager()->registerCursor(
{exec.release(),
cursorNss.ns(),
- txn->recoveryUnit()->isReadingFromMajorityCommittedSnapshot()});
+ opCtx->recoveryUnit()->isReadingFromMajorityCommittedSnapshot()});
cursorId = pinnedCursor.getCursor()->cursorid();
}
diff --git a/src/mongo/db/commands/lock_info.cpp b/src/mongo/db/commands/lock_info.cpp
index 950533ae333..69dd15a6c0b 100644
--- a/src/mongo/db/commands/lock_info.cpp
+++ b/src/mongo/db/commands/lock_info.cpp
@@ -79,7 +79,7 @@ public:
CmdLockInfo() : Command("lockInfo", true) {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& jsobj,
int,
@@ -87,7 +87,7 @@ public:
BSONObjBuilder& result) {
std::map<LockerId, BSONObj> lockToClientMap;
- for (ServiceContext::LockedClientsCursor cursor(txn->getClient()->getServiceContext());
+ for (ServiceContext::LockedClientsCursor cursor(opCtx->getClient()->getServiceContext());
Client* client = cursor.next();) {
invariant(client);
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 781c0d1d5af..25e9590a6d9 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -372,40 +372,40 @@ Config::Config(const string& _dbname, const BSONObj& cmdObj) {
void State::dropTempCollections() {
if (!_config.tempNamespace.isEmpty()) {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction scopedXact(_txn, MODE_IX);
- AutoGetDb autoDb(_txn, _config.tempNamespace.db(), MODE_X);
+ ScopedTransaction scopedXact(_opCtx, MODE_IX);
+ AutoGetDb autoDb(_opCtx, _config.tempNamespace.db(), MODE_X);
if (auto db = autoDb.getDb()) {
- WriteUnitOfWork wunit(_txn);
+ WriteUnitOfWork wunit(_opCtx);
uassert(ErrorCodes::PrimarySteppedDown,
"no longer primary",
repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(
- _txn, _config.tempNamespace));
- db->dropCollection(_txn, _config.tempNamespace.ns());
+ _opCtx, _config.tempNamespace));
+ db->dropCollection(_opCtx, _config.tempNamespace.ns());
wunit.commit();
}
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
- _txn, "M/R dropTempCollections", _config.tempNamespace.ns())
+ _opCtx, "M/R dropTempCollections", _config.tempNamespace.ns())
// Always forget about temporary namespaces, so we don't cache lots of them
ShardConnection::forgetNS(_config.tempNamespace.ns());
}
if (_useIncremental && !_config.incLong.isEmpty()) {
// We don't want to log the deletion of incLong as it isn't replicated. While
// harmless, this would lead to a scary looking warning on the secondaries.
- bool shouldReplicateWrites = _txn->writesAreReplicated();
- _txn->setReplicatedWrites(false);
- ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _txn, shouldReplicateWrites);
+ bool shouldReplicateWrites = _opCtx->writesAreReplicated();
+ _opCtx->setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _opCtx, shouldReplicateWrites);
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction scopedXact(_txn, MODE_IX);
- Lock::DBLock lk(_txn->lockState(), _config.incLong.db(), MODE_X);
- if (Database* db = dbHolder().get(_txn, _config.incLong.ns())) {
- WriteUnitOfWork wunit(_txn);
- db->dropCollection(_txn, _config.incLong.ns());
+ ScopedTransaction scopedXact(_opCtx, MODE_IX);
+ Lock::DBLock lk(_opCtx->lockState(), _config.incLong.db(), MODE_X);
+ if (Database* db = dbHolder().get(_opCtx, _config.incLong.ns())) {
+ WriteUnitOfWork wunit(_opCtx);
+ db->dropCollection(_opCtx, _config.incLong.ns());
wunit.commit();
}
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_txn, "M/R dropTempCollections", _config.incLong.ns())
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_opCtx, "M/R dropTempCollections", _config.incLong.ns())
ShardConnection::forgetNS(_config.incLong.ns());
}
@@ -422,20 +422,20 @@ void State::prepTempCollection() {
if (_useIncremental) {
// Create the inc collection and make sure we have index on "0" key.
// Intentionally not replicating the inc collection to secondaries.
- bool shouldReplicateWrites = _txn->writesAreReplicated();
- _txn->setReplicatedWrites(false);
- ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _txn, shouldReplicateWrites);
+ bool shouldReplicateWrites = _opCtx->writesAreReplicated();
+ _opCtx->setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _opCtx, shouldReplicateWrites);
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- OldClientWriteContext incCtx(_txn, _config.incLong.ns());
- WriteUnitOfWork wuow(_txn);
+ OldClientWriteContext incCtx(_opCtx, _config.incLong.ns());
+ WriteUnitOfWork wuow(_opCtx);
Collection* incColl = incCtx.getCollection();
invariant(!incColl);
CollectionOptions options;
options.setNoIdIndex();
options.temp = true;
- incColl = incCtx.db()->createCollection(_txn, _config.incLong.ns(), options);
+ incColl = incCtx.db()->createCollection(_opCtx, _config.incLong.ns(), options);
invariant(incColl);
// We explicitly create a v=2 index on the "0" field so that it is always possible for a
@@ -448,7 +448,7 @@ void State::prepTempCollection() {
<< "v"
<< static_cast<int>(IndexVersion::kV2));
Status status = incColl->getIndexCatalog()
- ->createIndexOnEmptyCollection(_txn, indexSpec)
+ ->createIndexOnEmptyCollection(_opCtx, indexSpec)
.getStatus();
if (!status.isOK()) {
uasserted(17305,
@@ -459,7 +459,7 @@ void State::prepTempCollection() {
}
wuow.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_txn, "M/R prepTempCollection", _config.incLong.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_opCtx, "M/R prepTempCollection", _config.incLong.ns());
}
CollectionOptions finalOptions;
@@ -467,13 +467,13 @@ void State::prepTempCollection() {
{
// copy indexes and collection options into temporary storage
- OldClientWriteContext finalCtx(_txn, _config.outputOptions.finalNamespace.ns());
+ OldClientWriteContext finalCtx(_opCtx, _config.outputOptions.finalNamespace.ns());
Collection* const finalColl = finalCtx.getCollection();
if (finalColl) {
- finalOptions = finalColl->getCatalogEntry()->getCollectionOptions(_txn);
+ finalOptions = finalColl->getCatalogEntry()->getCollectionOptions(_opCtx);
IndexCatalog::IndexIterator ii =
- finalColl->getIndexCatalog()->getIndexIterator(_txn, true);
+ finalColl->getIndexCatalog()->getIndexIterator(_opCtx, true);
// Iterate over finalColl's indexes.
while (ii.more()) {
IndexDescriptor* currIndex = ii.next();
@@ -495,23 +495,23 @@ void State::prepTempCollection() {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
// create temp collection and insert the indexes from temporary storage
- OldClientWriteContext tempCtx(_txn, _config.tempNamespace.ns());
- WriteUnitOfWork wuow(_txn);
+ OldClientWriteContext tempCtx(_opCtx, _config.tempNamespace.ns());
+ WriteUnitOfWork wuow(_opCtx);
uassert(ErrorCodes::PrimarySteppedDown,
"no longer primary",
- repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(_txn,
+ repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(_opCtx,
_config.tempNamespace));
Collection* tempColl = tempCtx.getCollection();
invariant(!tempColl);
CollectionOptions options = finalOptions;
options.temp = true;
- tempColl = tempCtx.db()->createCollection(_txn, _config.tempNamespace.ns(), options);
+ tempColl = tempCtx.db()->createCollection(_opCtx, _config.tempNamespace.ns(), options);
for (vector<BSONObj>::iterator it = indexesToInsert.begin(); it != indexesToInsert.end();
++it) {
Status status =
- tempColl->getIndexCatalog()->createIndexOnEmptyCollection(_txn, *it).getStatus();
+ tempColl->getIndexCatalog()->createIndexOnEmptyCollection(_opCtx, *it).getStatus();
if (!status.isOK()) {
if (status.code() == ErrorCodes::IndexAlreadyExists) {
continue;
@@ -520,11 +520,12 @@ void State::prepTempCollection() {
}
// Log the createIndex operation.
string logNs = _config.tempNamespace.db() + ".system.indexes";
- getGlobalServiceContext()->getOpObserver()->onCreateIndex(_txn, logNs, *it, false);
+ getGlobalServiceContext()->getOpObserver()->onCreateIndex(_opCtx, logNs, *it, false);
}
wuow.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_txn, "M/R prepTempCollection", _config.tempNamespace.ns())
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
+ _opCtx, "M/R prepTempCollection", _config.tempNamespace.ns())
}
/**
@@ -605,7 +606,7 @@ void State::appendResults(BSONObjBuilder& final) {
* Does post processing on output collection.
* This may involve replacing, merging or reducing.
*/
-long long State::postProcessCollection(OperationContext* txn,
+long long State::postProcessCollection(OperationContext* opCtx,
CurOp* curOp,
ProgressMeterHolder& pm) {
if (_onDisk == false || _config.outputOptions.outType == Config::INMEMORY)
@@ -613,22 +614,22 @@ long long State::postProcessCollection(OperationContext* txn,
bool holdingGlobalLock = false;
if (_config.outputOptions.outNonAtomic)
- return postProcessCollectionNonAtomic(txn, curOp, pm, holdingGlobalLock);
+ return postProcessCollectionNonAtomic(opCtx, curOp, pm, holdingGlobalLock);
- invariant(!txn->lockState()->isLocked());
+ invariant(!opCtx->lockState()->isLocked());
- ScopedTransaction transaction(txn, MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_X);
// This must be global because we may write across different databases.
- Lock::GlobalWrite lock(txn->lockState());
+ Lock::GlobalWrite lock(opCtx->lockState());
holdingGlobalLock = true;
- return postProcessCollectionNonAtomic(txn, curOp, pm, holdingGlobalLock);
+ return postProcessCollectionNonAtomic(opCtx, curOp, pm, holdingGlobalLock);
}
namespace {
// Runs a count against the namespace specified by 'ns'. If the caller holds the global write lock,
// then this function does not acquire any additional locks.
-unsigned long long _collectionCount(OperationContext* txn,
+unsigned long long _collectionCount(OperationContext* opCtx,
const NamespaceString& nss,
bool callerHoldsGlobalLock) {
Collection* coll = nullptr;
@@ -637,32 +638,32 @@ unsigned long long _collectionCount(OperationContext* txn,
// If the global write lock is held, we must avoid using AutoGetCollectionForRead as it may lead
// to deadlock when waiting for a majority snapshot to be committed. See SERVER-24596.
if (callerHoldsGlobalLock) {
- Database* db = dbHolder().get(txn, nss.ns());
+ Database* db = dbHolder().get(opCtx, nss.ns());
if (db) {
coll = db->getCollection(nss);
}
} else {
- ctx.emplace(txn, nss);
+ ctx.emplace(opCtx, nss);
coll = ctx->getCollection();
}
- return coll ? coll->numRecords(txn) : 0;
+ return coll ? coll->numRecords(opCtx) : 0;
}
} // namespace
-long long State::postProcessCollectionNonAtomic(OperationContext* txn,
+long long State::postProcessCollectionNonAtomic(OperationContext* opCtx,
CurOp* curOp,
ProgressMeterHolder& pm,
bool callerHoldsGlobalLock) {
if (_config.outputOptions.finalNamespace == _config.tempNamespace)
- return _collectionCount(txn, _config.outputOptions.finalNamespace, callerHoldsGlobalLock);
+ return _collectionCount(opCtx, _config.outputOptions.finalNamespace, callerHoldsGlobalLock);
if (_config.outputOptions.outType == Config::REPLACE ||
- _collectionCount(txn, _config.outputOptions.finalNamespace, callerHoldsGlobalLock) == 0) {
- ScopedTransaction transaction(txn, MODE_X);
+ _collectionCount(opCtx, _config.outputOptions.finalNamespace, callerHoldsGlobalLock) == 0) {
+ ScopedTransaction transaction(opCtx, MODE_X);
// This must be global because we may write across different databases.
- Lock::GlobalWrite lock(txn->lockState());
+ Lock::GlobalWrite lock(opCtx->lockState());
// replace: just rename from temp to final collection name, dropping previous collection
_db.dropCollection(_config.outputOptions.finalNamespace.ns());
BSONObj info;
@@ -680,17 +681,19 @@ long long State::postProcessCollectionNonAtomic(OperationContext* txn,
} else if (_config.outputOptions.outType == Config::MERGE) {
// merge: upsert new docs into old collection
{
- const auto count = _collectionCount(txn, _config.tempNamespace, callerHoldsGlobalLock);
- stdx::lock_guard<Client> lk(*txn->getClient());
+ const auto count =
+ _collectionCount(opCtx, _config.tempNamespace, callerHoldsGlobalLock);
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp->setMessage_inlock(
"m/r: merge post processing", "M/R Merge Post Processing Progress", count);
}
unique_ptr<DBClientCursor> cursor = _db.query(_config.tempNamespace.ns(), BSONObj());
while (cursor->more()) {
- ScopedTransaction scopedXact(txn, MODE_X);
- Lock::DBLock lock(txn->lockState(), _config.outputOptions.finalNamespace.db(), MODE_X);
+ ScopedTransaction scopedXact(opCtx, MODE_X);
+ Lock::DBLock lock(
+ opCtx->lockState(), _config.outputOptions.finalNamespace.db(), MODE_X);
BSONObj o = cursor->nextSafe();
- Helpers::upsert(txn, _config.outputOptions.finalNamespace.ns(), o);
+ Helpers::upsert(opCtx, _config.outputOptions.finalNamespace.ns(), o);
pm.hit();
}
_db.dropCollection(_config.tempNamespace.ns());
@@ -700,25 +703,26 @@ long long State::postProcessCollectionNonAtomic(OperationContext* txn,
BSONList values;
{
- const auto count = _collectionCount(txn, _config.tempNamespace, callerHoldsGlobalLock);
- stdx::lock_guard<Client> lk(*txn->getClient());
+ const auto count =
+ _collectionCount(opCtx, _config.tempNamespace, callerHoldsGlobalLock);
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp->setMessage_inlock(
"m/r: reduce post processing", "M/R Reduce Post Processing Progress", count);
}
unique_ptr<DBClientCursor> cursor = _db.query(_config.tempNamespace.ns(), BSONObj());
while (cursor->more()) {
- ScopedTransaction transaction(txn, MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_X);
// This must be global because we may write across different databases.
- Lock::GlobalWrite lock(txn->lockState());
+ Lock::GlobalWrite lock(opCtx->lockState());
BSONObj temp = cursor->nextSafe();
BSONObj old;
bool found;
{
- OldClientContext tx(txn, _config.outputOptions.finalNamespace.ns());
+ OldClientContext tx(opCtx, _config.outputOptions.finalNamespace.ns());
Collection* coll =
getCollectionOrUassert(tx.db(), _config.outputOptions.finalNamespace);
- found = Helpers::findOne(txn, coll, temp["_id"].wrap(), old, true);
+ found = Helpers::findOne(opCtx, coll, temp["_id"].wrap(), old, true);
}
if (found) {
@@ -726,18 +730,18 @@ long long State::postProcessCollectionNonAtomic(OperationContext* txn,
values.clear();
values.push_back(temp);
values.push_back(old);
- Helpers::upsert(txn,
+ Helpers::upsert(opCtx,
_config.outputOptions.finalNamespace.ns(),
_config.reducer->finalReduce(values, _config.finalizer.get()));
} else {
- Helpers::upsert(txn, _config.outputOptions.finalNamespace.ns(), temp);
+ Helpers::upsert(opCtx, _config.outputOptions.finalNamespace.ns(), temp);
}
pm.hit();
}
pm.finished();
}
- return _collectionCount(txn, _config.outputOptions.finalNamespace, callerHoldsGlobalLock);
+ return _collectionCount(opCtx, _config.outputOptions.finalNamespace, callerHoldsGlobalLock);
}
/**
@@ -747,11 +751,11 @@ void State::insert(const NamespaceString& nss, const BSONObj& o) {
verify(_onDisk);
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- OldClientWriteContext ctx(_txn, nss.ns());
- WriteUnitOfWork wuow(_txn);
+ OldClientWriteContext ctx(_opCtx, nss.ns());
+ WriteUnitOfWork wuow(_opCtx);
uassert(ErrorCodes::PrimarySteppedDown,
"no longer primary",
- repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(_txn, nss));
+ repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(_opCtx, nss));
Collection* coll = getCollectionOrUassert(ctx.db(), nss);
BSONObjBuilder b;
@@ -761,7 +765,7 @@ void State::insert(const NamespaceString& nss, const BSONObj& o) {
b.appendElements(o);
BSONObj bo = b.obj();
- StatusWith<BSONObj> res = fixDocumentForInsert(_txn->getServiceContext(), bo);
+ StatusWith<BSONObj> res = fixDocumentForInsert(_opCtx->getServiceContext(), bo);
uassertStatusOK(res.getStatus());
if (!res.getValue().isEmpty()) {
bo = res.getValue();
@@ -769,10 +773,10 @@ void State::insert(const NamespaceString& nss, const BSONObj& o) {
// TODO: Consider whether to pass OpDebug for stats tracking under SERVER-23261.
OpDebug* const nullOpDebug = nullptr;
- uassertStatusOK(coll->insertDocument(_txn, bo, nullOpDebug, true));
+ uassertStatusOK(coll->insertDocument(_opCtx, bo, nullOpDebug, true));
wuow.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_txn, "M/R insert", nss.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_opCtx, "M/R insert", nss.ns());
}
/**
@@ -782,12 +786,12 @@ void State::_insertToInc(BSONObj& o) {
verify(_onDisk);
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- OldClientWriteContext ctx(_txn, _config.incLong.ns());
- WriteUnitOfWork wuow(_txn);
+ OldClientWriteContext ctx(_opCtx, _config.incLong.ns());
+ WriteUnitOfWork wuow(_opCtx);
Collection* coll = getCollectionOrUassert(ctx.db(), _config.incLong);
- bool shouldReplicateWrites = _txn->writesAreReplicated();
- _txn->setReplicatedWrites(false);
- ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _txn, shouldReplicateWrites);
+ bool shouldReplicateWrites = _opCtx->writesAreReplicated();
+ _opCtx->setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _opCtx, shouldReplicateWrites);
// The documents inserted into the incremental collection are of the form
// {"0": <key>, "1": <value>}, so we cannot call fixDocumentForInsert(o) here because the
@@ -804,14 +808,20 @@ void State::_insertToInc(BSONObj& o) {
// TODO: Consider whether to pass OpDebug for stats tracking under SERVER-23261.
OpDebug* const nullOpDebug = nullptr;
- uassertStatusOK(coll->insertDocument(_txn, o, nullOpDebug, true, false));
+ uassertStatusOK(coll->insertDocument(_opCtx, o, nullOpDebug, true, false));
wuow.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_txn, "M/R insertToInc", _config.incLong.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_opCtx, "M/R insertToInc", _config.incLong.ns());
}
-State::State(OperationContext* txn, const Config& c)
- : _config(c), _db(txn), _useIncremental(true), _txn(txn), _size(0), _dupCount(0), _numEmits(0) {
+State::State(OperationContext* opCtx, const Config& c)
+ : _config(c),
+ _db(opCtx),
+ _useIncremental(true),
+ _opCtx(opCtx),
+ _size(0),
+ _dupCount(0),
+ _numEmits(0) {
_temp.reset(new InMemory());
_onDisk = _config.outputOptions.outType != Config::INMEMORY;
}
@@ -849,9 +859,9 @@ void State::init() {
const string userToken =
AuthorizationSession::get(Client::getCurrent())->getAuthenticatedUserNamesToken();
_scope.reset(getGlobalScriptEngine()->newScopeForCurrentThread());
- _scope->registerOperation(_txn);
+ _scope->registerOperation(_opCtx);
_scope->setLocalDB(_config.dbname);
- _scope->loadStored(_txn, true);
+ _scope->loadStored(_opCtx, true);
if (!_config.scopeSetup.isEmpty())
_scope->init(&_config.scopeSetup);
@@ -1027,7 +1037,7 @@ BSONObj _nativeToTemp(const BSONObj& args, void* data) {
* After calling this method, the temp collection will be completed.
* If inline, the results will be in the in memory map
*/
-void State::finalReduce(OperationContext* txn, CurOp* curOp, ProgressMeterHolder& pm) {
+void State::finalReduce(OperationContext* opCtx, CurOp* curOp, ProgressMeterHolder& pm) {
if (_jsMode) {
// apply the reduce within JS
if (_onDisk) {
@@ -1066,12 +1076,12 @@ void State::finalReduce(OperationContext* txn, CurOp* curOp, ProgressMeterHolder
BSONObj sortKey = BSON("0" << 1);
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- OldClientWriteContext incCtx(_txn, _config.incLong.ns());
- WriteUnitOfWork wuow(_txn);
+ OldClientWriteContext incCtx(_opCtx, _config.incLong.ns());
+ WriteUnitOfWork wuow(_opCtx);
Collection* incColl = getCollectionOrUassert(incCtx.db(), _config.incLong);
bool foundIndex = false;
- IndexCatalog::IndexIterator ii = incColl->getIndexCatalog()->getIndexIterator(_txn, true);
+ IndexCatalog::IndexIterator ii = incColl->getIndexCatalog()->getIndexIterator(_opCtx, true);
// Iterate over incColl's indexes.
while (ii.more()) {
IndexDescriptor* currIndex = ii.next();
@@ -1085,28 +1095,28 @@ void State::finalReduce(OperationContext* txn, CurOp* curOp, ProgressMeterHolder
verify(foundIndex);
wuow.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_txn, "finalReduce", _config.incLong.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_opCtx, "finalReduce", _config.incLong.ns());
- unique_ptr<AutoGetCollectionForRead> ctx(new AutoGetCollectionForRead(_txn, _config.incLong));
+ unique_ptr<AutoGetCollectionForRead> ctx(new AutoGetCollectionForRead(_opCtx, _config.incLong));
BSONObj prev;
BSONList all;
{
const auto count = _db.count(_config.incLong.ns(), BSONObj(), QueryOption_SlaveOk);
- stdx::lock_guard<Client> lk(*_txn->getClient());
+ stdx::lock_guard<Client> lk(*_opCtx->getClient());
verify(pm ==
curOp->setMessage_inlock("m/r: (3/3) final reduce to collection",
"M/R: (3/3) Final Reduce Progress",
count));
}
- const ExtensionsCallbackReal extensionsCallback(_txn, &_config.incLong);
+ const ExtensionsCallbackReal extensionsCallback(_opCtx, &_config.incLong);
auto qr = stdx::make_unique<QueryRequest>(_config.incLong);
qr->setSort(sortKey);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
+ auto statusWithCQ = CanonicalQuery::canonicalize(opCtx, std::move(qr), extensionsCallback);
verify(statusWithCQ.isOK());
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -1114,7 +1124,7 @@ void State::finalReduce(OperationContext* txn, CurOp* curOp, ProgressMeterHolder
invariant(coll);
auto statusWithPlanExecutor = getExecutor(
- _txn, coll, std::move(cq), PlanExecutor::YIELD_AUTO, QueryPlannerParams::NO_TABLE_SCAN);
+ _opCtx, coll, std::move(cq), PlanExecutor::YIELD_AUTO, QueryPlannerParams::NO_TABLE_SCAN);
verify(statusWithPlanExecutor.isOK());
unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
@@ -1130,7 +1140,7 @@ void State::finalReduce(OperationContext* txn, CurOp* curOp, ProgressMeterHolder
// object is same as previous, add to array
all.push_back(o);
if (pm->hits() % 100 == 0) {
- _txn->checkForInterrupt();
+ _opCtx->checkForInterrupt();
}
continue;
}
@@ -1142,7 +1152,7 @@ void State::finalReduce(OperationContext* txn, CurOp* curOp, ProgressMeterHolder
// reduce a finalize array
finalReduce(all);
- ctx.reset(new AutoGetCollectionForRead(_txn, _config.incLong));
+ ctx.reset(new AutoGetCollectionForRead(_opCtx, _config.incLong));
all.clear();
prev = o;
@@ -1152,7 +1162,7 @@ void State::finalReduce(OperationContext* txn, CurOp* curOp, ProgressMeterHolder
uasserted(34375, "Plan executor killed during mapReduce final reduce");
}
- _txn->checkForInterrupt();
+ _opCtx->checkForInterrupt();
}
uassert(34428,
@@ -1162,7 +1172,7 @@ void State::finalReduce(OperationContext* txn, CurOp* curOp, ProgressMeterHolder
ctx.reset();
// reduce and finalize last array
finalReduce(all);
- ctx.reset(new AutoGetCollectionForRead(_txn, _config.incLong));
+ ctx.reset(new AutoGetCollectionForRead(_opCtx, _config.incLong));
pm.finished();
}
@@ -1247,7 +1257,7 @@ int State::_add(InMemory* im, const BSONObj& a) {
void State::reduceAndSpillInMemoryStateIfNeeded() {
// Make sure no DB locks are held, because this method manages its own locking and
// write units of work.
- invariant(!_txn->lockState()->isLocked());
+ invariant(!_opCtx->lockState()->isLocked());
if (_jsMode) {
// try to reduce if it is beneficial
@@ -1362,7 +1372,7 @@ public:
addPrivilegesRequiredForMapReduce(this, dbname, cmdObj, out);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmd,
int,
@@ -1372,9 +1382,9 @@ public:
boost::optional<DisableDocumentValidation> maybeDisableValidation;
if (shouldBypassDocumentValidationForCommand(cmd))
- maybeDisableValidation.emplace(txn);
+ maybeDisableValidation.emplace(opCtx);
- auto client = txn->getClient();
+ auto client = opCtx->getClient();
if (client->isInDirectClient()) {
return appendCommandStatus(
@@ -1382,7 +1392,7 @@ public:
Status(ErrorCodes::IllegalOperation, "Cannot run mapReduce command from eval()"));
}
- auto curOp = CurOp::get(txn);
+ auto curOp = CurOp::get(opCtx);
const Config config(dbname, cmd);
@@ -1404,7 +1414,7 @@ public:
unique_ptr<RangePreserver> rangePreserver;
ScopedCollectionMetadata collMetadata;
{
- AutoGetCollectionForRead ctx(txn, config.nss);
+ AutoGetCollectionForRead ctx(opCtx, config.nss);
Collection* collection = ctx.getCollection();
if (collection) {
@@ -1413,19 +1423,19 @@ public:
// Get metadata before we check our version, to make sure it doesn't increment
// in the meantime. Need to do this in the same lock scope as the block.
- if (ShardingState::get(txn)->needCollectionMetadata(txn, config.nss.ns())) {
- collMetadata = CollectionShardingState::get(txn, config.nss)->getMetadata();
+ if (ShardingState::get(opCtx)->needCollectionMetadata(opCtx, config.nss.ns())) {
+ collMetadata = CollectionShardingState::get(opCtx, config.nss)->getMetadata();
}
}
// Ensure that the RangePreserver is freed under the lock. This is necessary since the
// RangePreserver's destructor unpins a ClientCursor, and access to the CursorManager must
// be done under the lock.
- ON_BLOCK_EXIT([txn, &config, &rangePreserver] {
+ ON_BLOCK_EXIT([opCtx, &config, &rangePreserver] {
if (rangePreserver) {
// Be sure not to use AutoGetCollectionForRead here, since that has side-effects
// other than lock acquisition.
- AutoGetCollection ctx(txn, config.nss, MODE_IS);
+ AutoGetCollection ctx(opCtx, config.nss, MODE_IS);
rangePreserver.reset();
}
});
@@ -1434,7 +1444,7 @@ public:
BSONObjBuilder countsBuilder;
BSONObjBuilder timingBuilder;
- State state(txn, config);
+ State state(opCtx, config);
if (!state.sourceExists()) {
return appendCommandStatus(
result,
@@ -1444,7 +1454,7 @@ public:
if (state.isOnDisk()) {
// this means that it will be doing a write operation, make sure we are on Master
// ideally this check should be in slaveOk(), but at that point config is not known
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor_UNSAFE(txn,
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor_UNSAFE(opCtx,
config.nss)) {
errmsg = "not master";
return false;
@@ -1460,7 +1470,7 @@ public:
bool showTotal = true;
if (state.config().filter.isEmpty()) {
const bool holdingGlobalLock = false;
- const auto count = _collectionCount(txn, config.nss, holdingGlobalLock);
+ const auto count = _collectionCount(opCtx, config.nss, holdingGlobalLock);
progressTotal =
(config.limit && (unsigned)config.limit < count) ? config.limit : count;
} else {
@@ -1469,7 +1479,7 @@ public:
progressTotal = 1;
}
- stdx::unique_lock<Client> lk(*txn->getClient());
+ stdx::unique_lock<Client> lk(*opCtx->getClient());
ProgressMeter& progress(curOp->setMessage_inlock(
"m/r: (1/3) emit phase", "M/R: (1/3) Emit Progress", progressTotal));
lk.unlock();
@@ -1488,18 +1498,18 @@ public:
// useful cursor.
// Need lock and context to use it
- unique_ptr<ScopedTransaction> scopedXact(new ScopedTransaction(txn, MODE_IS));
- unique_ptr<AutoGetDb> scopedAutoDb(new AutoGetDb(txn, config.nss.db(), MODE_S));
+ unique_ptr<ScopedTransaction> scopedXact(new ScopedTransaction(opCtx, MODE_IS));
+ unique_ptr<AutoGetDb> scopedAutoDb(new AutoGetDb(opCtx, config.nss.db(), MODE_S));
auto qr = stdx::make_unique<QueryRequest>(config.nss);
qr->setFilter(config.filter);
qr->setSort(config.sort);
qr->setCollation(config.collation);
- const ExtensionsCallbackReal extensionsCallback(txn, &config.nss);
+ const ExtensionsCallbackReal extensionsCallback(opCtx, &config.nss);
auto statusWithCQ =
- CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
+ CanonicalQuery::canonicalize(opCtx, std::move(qr), extensionsCallback);
if (!statusWithCQ.isOK()) {
uasserted(17238, "Can't canonicalize query " + config.filter.toString());
return 0;
@@ -1513,7 +1523,7 @@ public:
invariant(coll);
auto statusWithPlanExecutor =
- getExecutor(txn, coll, std::move(cq), PlanExecutor::YIELD_AUTO);
+ getExecutor(opCtx, coll, std::move(cq), PlanExecutor::YIELD_AUTO);
if (!statusWithPlanExecutor.isOK()) {
uasserted(17239,
"Can't get executor for query " + config.filter.toString());
@@ -1524,8 +1534,8 @@ public:
}
{
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
}
Timer mt;
@@ -1568,8 +1578,8 @@ public:
state.reduceAndSpillInMemoryStateIfNeeded();
- scopedXact.reset(new ScopedTransaction(txn, MODE_IS));
- scopedAutoDb.reset(new AutoGetDb(txn, config.nss.db(), MODE_S));
+ scopedXact.reset(new ScopedTransaction(opCtx, MODE_IS));
+ scopedAutoDb.reset(new AutoGetDb(opCtx, config.nss.db(), MODE_S));
if (!exec->restoreState()) {
return appendCommandStatus(
@@ -1581,7 +1591,7 @@ public:
reduceTime += t.micros();
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
}
pm.hit();
@@ -1608,7 +1618,7 @@ public:
Collection* coll = scopedAutoDb->getDb()->getCollection(config.nss);
invariant(coll); // 'exec' hasn't been killed, so collection must be alive.
- coll->infoCache()->notifyOfQuery(txn, stats.indexesUsed);
+ coll->infoCache()->notifyOfQuery(opCtx, stats.indexesUsed);
if (curOp->shouldDBProfile()) {
BSONObjBuilder execStatsBob;
@@ -1618,7 +1628,7 @@ public:
}
pm.finished();
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
// update counters
countsBuilder.appendNumber("input", numInputs);
@@ -1630,7 +1640,7 @@ public:
timingBuilder.append("emitLoop", t.millis());
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp->setMessage_inlock("m/r: (2/3) final reduce in memory",
"M/R: (2/3) Final In-Memory Reduce Progress");
}
@@ -1641,13 +1651,13 @@ public:
// if not inline: dump the in memory map to inc collection, all data is on disk
state.dumpToInc();
// final reduce
- state.finalReduce(txn, curOp, pm);
+ state.finalReduce(opCtx, curOp, pm);
reduceTime += rt.micros();
// Ensure the profile shows the source namespace. If the output was not inline, the
// active namespace will be the temporary collection we inserted into.
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp->setNS_inlock(config.nss.ns());
}
@@ -1655,7 +1665,7 @@ public:
timingBuilder.appendNumber("reduceTime", reduceTime / 1000);
timingBuilder.append("mode", state.jsMode() ? "js" : "mixed");
- long long finalCount = state.postProcessCollection(txn, curOp, pm);
+ long long finalCount = state.postProcessCollection(opCtx, curOp, pm);
state.appendResults(result);
timingBuilder.appendNumber("total", t.millis());
@@ -1718,7 +1728,7 @@ public:
actions.addAction(ActionType::internal);
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -1734,7 +1744,7 @@ public:
boost::optional<DisableDocumentValidation> maybeDisableValidation;
if (shouldBypassDocumentValidationForCommand(cmdObj))
- maybeDisableValidation.emplace(txn);
+ maybeDisableValidation.emplace(opCtx);
ShardedConnectionInfo::addHook();
@@ -1754,10 +1764,10 @@ public:
inputNS = NamespaceString(dbname, shardedOutputCollection).ns();
}
- CurOp* curOp = CurOp::get(txn);
+ CurOp* curOp = CurOp::get(opCtx);
Config config(dbname, cmdObj.firstElement().embeddedObjectUserCheck());
- State state(txn, config);
+ State state(opCtx, config);
state.init();
// no need for incremental collection because records are already sorted
@@ -1767,7 +1777,7 @@ public:
BSONObj shardCounts = cmdObj["shardCounts"].embeddedObjectUserCheck();
BSONObj counts = cmdObj["counts"].embeddedObjectUserCheck();
- stdx::unique_lock<Client> lk(*txn->getClient());
+ stdx::unique_lock<Client> lk(*opCtx->getClient());
ProgressMeterHolder pm(curOp->setMessage_inlock("m/r: merge sort and reduce",
"M/R Merge Sort and Reduce Progress"));
lk.unlock();
@@ -1781,7 +1791,7 @@ public:
std::string server = e.fieldName();
servers.insert(server);
- uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, server));
+ uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, server));
}
}
@@ -1801,7 +1811,7 @@ public:
result.append("result", config.outputOptions.collectionName);
}
- auto scopedDbStatus = ScopedShardDatabase::getExisting(txn, dbname);
+ auto scopedDbStatus = ScopedShardDatabase::getExisting(opCtx, dbname);
if (!scopedDbStatus.isOK()) {
return appendCommandStatus(result, scopedDbStatus.getStatus());
}
@@ -1812,11 +1822,11 @@ public:
if (confOut->isSharded(config.outputOptions.finalNamespace.ns())) {
shared_ptr<ChunkManager> cm =
- confOut->getChunkManager(txn, config.outputOptions.finalNamespace.ns());
+ confOut->getChunkManager(opCtx, config.outputOptions.finalNamespace.ns());
// Fetch result from other shards 1 chunk at a time. It would be better to do just one
// big $or query, but then the sorting would not be efficient.
- const string shardName = ShardingState::get(txn)->getShardName();
+ const string shardName = ShardingState::get(opCtx)->getShardName();
const ChunkMap& chunkMap = cm->getChunkMap();
for (ChunkMap::const_iterator it = chunkMap.begin(); it != chunkMap.end(); ++it) {
@@ -1846,7 +1856,7 @@ public:
BSONObj sortKey = BSON("_id" << 1);
ParallelSortClusteredCursor cursor(
servers, inputNS, Query(query).sort(sortKey), QueryOption_NoCursorTimeout);
- cursor.init(txn);
+ cursor.init(opCtx);
int chunkSize = 0;
while (cursor.more() || !values.empty()) {
@@ -1890,7 +1900,7 @@ public:
result.append("chunkSizes", chunkSizes.arr());
- long long outputCount = state.postProcessCollection(txn, curOp, pm);
+ long long outputCount = state.postProcessCollection(opCtx, curOp, pm);
state.appendResults(result);
BSONObjBuilder countsB(32);
diff --git a/src/mongo/db/commands/mr.h b/src/mongo/db/commands/mr.h
index aa729f49e7f..15baf5e8fb9 100644
--- a/src/mongo/db/commands/mr.h
+++ b/src/mongo/db/commands/mr.h
@@ -260,9 +260,9 @@ public:
class State {
public:
/**
- * txn must outlive this State.
+ * opCtx must outlive this State.
*/
- State(OperationContext* txn, const Config& c);
+ State(OperationContext* opCtx, const Config& c);
~State();
void init();
@@ -305,7 +305,7 @@ public:
void finalReduce(BSONList& values);
- void finalReduce(OperationContext* txn, CurOp* op, ProgressMeterHolder& pm);
+ void finalReduce(OperationContext* opCtx, CurOp* op, ProgressMeterHolder& pm);
// ------- cleanup/data positioning ----------
@@ -317,8 +317,8 @@ public:
/**
@return number objects in collection
*/
- long long postProcessCollection(OperationContext* txn, CurOp* op, ProgressMeterHolder& pm);
- long long postProcessCollectionNonAtomic(OperationContext* txn,
+ long long postProcessCollection(OperationContext* opCtx, CurOp* op, ProgressMeterHolder& pm);
+ long long postProcessCollectionNonAtomic(OperationContext* opCtx,
CurOp* op,
ProgressMeterHolder& pm,
bool callerHoldsGlobalLock);
@@ -388,7 +388,7 @@ protected:
*/
int _add(InMemory* im, const BSONObj& a);
- OperationContext* _txn;
+ OperationContext* _opCtx;
std::unique_ptr<Scope> _scope;
bool _onDisk; // if the end result of this map reduce is disk or not
diff --git a/src/mongo/db/commands/oplog_note.cpp b/src/mongo/db/commands/oplog_note.cpp
index 35062313941..39d3d175ff0 100644
--- a/src/mongo/db/commands/oplog_note.cpp
+++ b/src/mongo/db/commands/oplog_note.cpp
@@ -69,7 +69,7 @@ public:
}
return Status::OK();
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -87,11 +87,11 @@ public:
return appendCommandStatus(result, status);
}
- ScopedTransaction scopedXact(txn, MODE_X);
- Lock::GlobalWrite globalWrite(txn->lockState());
+ ScopedTransaction scopedXact(opCtx, MODE_X);
+ Lock::GlobalWrite globalWrite(opCtx->lockState());
- WriteUnitOfWork wuow(txn);
- getGlobalServiceContext()->getOpObserver()->onOpMessage(txn, dataElement.Obj());
+ WriteUnitOfWork wuow(opCtx);
+ getGlobalServiceContext()->getOpObserver()->onOpMessage(opCtx, dataElement.Obj());
wuow.commit();
return true;
}
diff --git a/src/mongo/db/commands/parallel_collection_scan.cpp b/src/mongo/db/commands/parallel_collection_scan.cpp
index 41689016961..df783b46062 100644
--- a/src/mongo/db/commands/parallel_collection_scan.cpp
+++ b/src/mongo/db/commands/parallel_collection_scan.cpp
@@ -85,7 +85,7 @@ public:
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -93,7 +93,7 @@ public:
BSONObjBuilder& result) {
const NamespaceString ns(parseNsCollectionRequired(dbname, cmdObj));
- AutoGetCollectionForRead ctx(txn, ns);
+ AutoGetCollectionForRead ctx(opCtx, ns);
Collection* collection = ctx.getCollection();
if (!collection)
@@ -111,7 +111,7 @@ public:
<< " was: "
<< numCursors));
- auto iterators = collection->getManyCursors(txn);
+ auto iterators = collection->getManyCursors(opCtx);
if (iterators.size() < numCursors) {
numCursors = iterators.size();
}
@@ -120,11 +120,11 @@ public:
for (size_t i = 0; i < numCursors; i++) {
unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
unique_ptr<MultiIteratorStage> mis =
- make_unique<MultiIteratorStage>(txn, ws.get(), collection);
+ make_unique<MultiIteratorStage>(opCtx, ws.get(), collection);
// Takes ownership of 'ws' and 'mis'.
auto statusWithPlanExecutor = PlanExecutor::make(
- txn, std::move(ws), std::move(mis), collection, PlanExecutor::YIELD_AUTO);
+ opCtx, std::move(ws), std::move(mis), collection, PlanExecutor::YIELD_AUTO);
invariant(statusWithPlanExecutor.isOK());
execs.push_back(std::move(statusWithPlanExecutor.getValue()));
}
@@ -152,9 +152,9 @@ public:
auto pinnedCursor = collection->getCursorManager()->registerCursor(
{exec.release(),
ns.ns(),
- txn->recoveryUnit()->isReadingFromMajorityCommittedSnapshot()});
+ opCtx->recoveryUnit()->isReadingFromMajorityCommittedSnapshot()});
pinnedCursor.getCursor()->setLeftoverMaxTimeMicros(
- txn->getRemainingMaxTimeMicros());
+ opCtx->getRemainingMaxTimeMicros());
BSONObjBuilder threadResult;
appendCursorResponseObject(
diff --git a/src/mongo/db/commands/parameters.cpp b/src/mongo/db/commands/parameters.cpp
index a3c252d050a..f2e10dac2b5 100644
--- a/src/mongo/db/commands/parameters.cpp
+++ b/src/mongo/db/commands/parameters.cpp
@@ -88,7 +88,7 @@ public:
appendParameterNames(help);
help << "{ getParameter:'*' } to get everything\n";
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -101,7 +101,7 @@ public:
const ServerParameter::Map& m = ServerParameterSet::getGlobal()->getMap();
for (ServerParameter::Map::const_iterator i = m.begin(); i != m.end(); ++i) {
if (all || cmdObj.hasElement(i->first.c_str())) {
- i->second->append(txn, result, i->second->name());
+ i->second->append(opCtx, result, i->second->name());
}
}
@@ -137,7 +137,7 @@ public:
help << "{ setParameter:1, <param>:<value> }\n";
appendParameterNames(help);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -213,7 +213,7 @@ public:
}
if (numSet == 0) {
- foundParameter->second->append(txn, result, "was");
+ foundParameter->second->append(opCtx, result, "was");
}
Status status = foundParameter->second->set(parameter);
@@ -247,7 +247,7 @@ class LogLevelSetting : public ServerParameter {
public:
LogLevelSetting() : ServerParameter(ServerParameterSet::getGlobal(), "logLevel") {}
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
+ virtual void append(OperationContext* opCtx, BSONObjBuilder& b, const std::string& name) {
b << name << globalLogDomain()->getMinimumLogSeverity().toInt();
}
@@ -290,7 +290,7 @@ public:
LogComponentVerbositySetting()
: ServerParameter(ServerParameterSet::getGlobal(), "logComponentVerbosity") {}
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
+ virtual void append(OperationContext* opCtx, BSONObjBuilder& b, const std::string& name) {
BSONObj currentSettings;
_get(&currentSettings);
b << name << currentSettings;
@@ -459,7 +459,7 @@ public:
}
}
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
+ virtual void append(OperationContext* opCtx, BSONObjBuilder& b, const std::string& name) {
b << name << sslModeStr();
}
@@ -530,7 +530,7 @@ public:
}
}
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
+ virtual void append(OperationContext* opCtx, BSONObjBuilder& b, const std::string& name) {
b << name << clusterAuthModeStr();
}
@@ -607,7 +607,7 @@ public:
AutomationServiceDescriptor()
: ServerParameter(ServerParameterSet::getGlobal(), kName.toString(), true, true) {}
- virtual void append(OperationContext* txn,
+ virtual void append(OperationContext* opCtx,
BSONObjBuilder& builder,
const std::string& name) override {
const stdx::lock_guard<stdx::mutex> lock(_mutex);
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index 0f07e38c830..4fcfb97f574 100644
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -88,7 +88,7 @@ namespace {
* namespace used in the returned cursor. In the case of views, this can be different from that
* in 'request'.
*/
-bool handleCursorCommand(OperationContext* txn,
+bool handleCursorCommand(OperationContext* opCtx,
const string& nsForCursor,
ClientCursor* cursor,
PlanExecutor* exec,
@@ -150,16 +150,16 @@ bool handleCursorCommand(OperationContext* txn,
if (cursor) {
// If a time limit was set on the pipeline, remaining time is "rolled over" to the
// cursor (for use by future getmore ops).
- cursor->setLeftoverMaxTimeMicros(txn->getRemainingMaxTimeMicros());
+ cursor->setLeftoverMaxTimeMicros(opCtx->getRemainingMaxTimeMicros());
- CurOp::get(txn)->debug().cursorid = cursor->cursorid();
+ CurOp::get(opCtx)->debug().cursorid = cursor->cursorid();
// Cursor needs to be in a saved state while we yield locks for getmore. State
// will be restored in getMore().
exec->saveState();
exec->detachFromOperationContext();
} else {
- CurOp::get(txn)->debug().cursorExhausted = true;
+ CurOp::get(opCtx)->debug().cursorExhausted = true;
}
const long long cursorId = cursor ? cursor->cursorid() : 0LL;
@@ -169,12 +169,12 @@ bool handleCursorCommand(OperationContext* txn,
}
StatusWith<StringMap<ExpressionContext::ResolvedNamespace>> resolveInvolvedNamespaces(
- OperationContext* txn, const AggregationRequest& request) {
+ OperationContext* opCtx, const AggregationRequest& request) {
// We intentionally do not drop and reacquire our DB lock after resolving the view definition in
// order to prevent the definition for any view namespaces we've already resolved from changing.
// This is necessary to prevent a cycle from being formed among the view definitions cached in
// 'resolvedNamespaces' because we won't re-resolve a view namespace we've already encountered.
- AutoGetDb autoDb(txn, request.getNamespaceString().db(), MODE_IS);
+ AutoGetDb autoDb(opCtx, request.getNamespaceString().db(), MODE_IS);
Database* const db = autoDb.getDb();
ViewCatalog* viewCatalog = db ? db->getViewCatalog() : nullptr;
@@ -199,9 +199,9 @@ StatusWith<StringMap<ExpressionContext::ResolvedNamespace>> resolveInvolvedNames
// pipeline because 'involvedNs' doesn't refer to a view namespace in our consistent
// snapshot of the view catalog.
resolvedNamespaces[involvedNs.coll()] = {involvedNs, std::vector<BSONObj>{}};
- } else if (viewCatalog->lookup(txn, involvedNs.ns())) {
+ } else if (viewCatalog->lookup(opCtx, involvedNs.ns())) {
// If 'involvedNs' refers to a view namespace, then we resolve its definition.
- auto resolvedView = viewCatalog->resolveView(txn, involvedNs);
+ auto resolvedView = viewCatalog->resolveView(opCtx, involvedNs);
if (!resolvedView.isOK()) {
return {ErrorCodes::FailedToParse,
str::stream() << "Failed to resolve view '" << involvedNs.ns() << "': "
@@ -265,7 +265,7 @@ boost::intrusive_ptr<Pipeline> reparsePipeline(
* Returns Status::OK if each view namespace in 'pipeline' has a default collator equivalent to
* 'collator'. Otherwise, returns ErrorCodes::OptionNotSupportedOnView.
*/
-Status collatorCompatibleWithPipeline(OperationContext* txn,
+Status collatorCompatibleWithPipeline(OperationContext* opCtx,
Database* db,
const CollatorInterface* collator,
const intrusive_ptr<Pipeline> pipeline) {
@@ -277,7 +277,7 @@ Status collatorCompatibleWithPipeline(OperationContext* txn,
continue;
}
- auto view = db->getViewCatalog()->lookup(txn, potentialViewNs.ns());
+ auto view = db->getViewCatalog()->lookup(opCtx, potentialViewNs.ns());
if (!view) {
continue;
}
@@ -339,7 +339,7 @@ public:
return AuthorizationSession::get(client)->checkAuthForAggregate(nss, cmdObj);
}
- bool runParsed(OperationContext* txn,
+ bool runParsed(OperationContext* opCtx,
const NamespaceString& origNss,
const AggregationRequest& request,
BSONObj& cmdObj,
@@ -351,14 +351,14 @@ public:
// Parse the user-specified collation, if any.
std::unique_ptr<CollatorInterface> userSpecifiedCollator = request.getCollation().isEmpty()
? nullptr
- : uassertStatusOK(CollatorFactoryInterface::get(txn->getServiceContext())
+ : uassertStatusOK(CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(request.getCollation()));
boost::optional<ClientCursorPin> pin; // either this OR the exec will be non-null
unique_ptr<PlanExecutor> exec;
boost::intrusive_ptr<ExpressionContext> expCtx;
boost::intrusive_ptr<Pipeline> pipeline;
- auto curOp = CurOp::get(txn);
+ auto curOp = CurOp::get(opCtx);
{
// This will throw if the sharding version for this connection is out of date. If the
// namespace is a view, the lock will be released before re-running the aggregation.
@@ -367,7 +367,7 @@ public:
// same sharding version that we synchronize on here. This is also why we always need to
// create a ClientCursor even when we aren't outputting to a cursor. See the comment on
// ShardFilterStage for more details.
- AutoGetCollectionOrViewForRead ctx(txn, nss);
+ AutoGetCollectionOrViewForRead ctx(opCtx, nss);
Collection* collection = ctx.getCollection();
// If this is a view, resolve it by finding the underlying collection and stitching view
@@ -390,7 +390,7 @@ public:
}
auto viewDefinition =
- ViewShardingCheck::getResolvedViewIfSharded(txn, ctx.getDb(), ctx.getView());
+ ViewShardingCheck::getResolvedViewIfSharded(opCtx, ctx.getDb(), ctx.getView());
if (!viewDefinition.isOK()) {
return appendCommandStatus(result, viewDefinition.getStatus());
}
@@ -400,7 +400,7 @@ public:
return false;
}
- auto resolvedView = ctx.getDb()->getViewCatalog()->resolveView(txn, nss);
+ auto resolvedView = ctx.getDb()->getViewCatalog()->resolveView(opCtx, nss);
if (!resolvedView.isOK()) {
return appendCommandStatus(result, resolvedView.getStatus());
}
@@ -425,11 +425,11 @@ public:
newRequest.getValue().setCollation(collationSpec);
bool status = runParsed(
- txn, origNss, newRequest.getValue(), newCmd.getValue(), errmsg, result);
+ opCtx, origNss, newRequest.getValue(), newCmd.getValue(), errmsg, result);
{
// Set the namespace of the curop back to the view namespace so ctx records
// stats on this view namespace on destruction.
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp->setNS_inlock(nss.ns());
}
return status;
@@ -449,10 +449,10 @@ public:
}
expCtx.reset(
- new ExpressionContext(txn,
+ new ExpressionContext(opCtx,
request,
std::move(collatorToUse),
- uassertStatusOK(resolveInvolvedNamespaces(txn, request))));
+ uassertStatusOK(resolveInvolvedNamespaces(opCtx, request))));
expCtx->tempDir = storageGlobalParams.dbpath + "/_tmp";
// Parse the pipeline.
@@ -465,7 +465,7 @@ public:
// Check that the view's collation matches the collation of any views involved
// in the pipeline.
auto pipelineCollationStatus =
- collatorCompatibleWithPipeline(txn, ctx.getDb(), expCtx->getCollator(), pipeline);
+ collatorCompatibleWithPipeline(opCtx, ctx.getDb(), expCtx->getCollator(), pipeline);
if (!pipelineCollationStatus.isOK()) {
return appendCommandStatus(result, pipelineCollationStatus);
}
@@ -488,19 +488,22 @@ public:
// ('ws') and the PipelineProxyStage ('proxy') will be owned by the created
// PlanExecutor.
auto ws = make_unique<WorkingSet>();
- auto proxy = make_unique<PipelineProxyStage>(txn, pipeline, ws.get());
+ auto proxy = make_unique<PipelineProxyStage>(opCtx, pipeline, ws.get());
auto statusWithPlanExecutor = (NULL == collection)
? PlanExecutor::make(
- txn, std::move(ws), std::move(proxy), nss.ns(), PlanExecutor::YIELD_MANUAL)
- : PlanExecutor::make(
- txn, std::move(ws), std::move(proxy), collection, PlanExecutor::YIELD_MANUAL);
+ opCtx, std::move(ws), std::move(proxy), nss.ns(), PlanExecutor::YIELD_MANUAL)
+ : PlanExecutor::make(opCtx,
+ std::move(ws),
+ std::move(proxy),
+ collection,
+ PlanExecutor::YIELD_MANUAL);
invariant(statusWithPlanExecutor.isOK());
exec = std::move(statusWithPlanExecutor.getValue());
{
auto planSummary = Explain::getPlanSummary(exec.get());
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp->setPlanSummary_inlock(std::move(planSummary));
}
@@ -509,7 +512,7 @@ public:
pin.emplace(collection->getCursorManager()->registerCursor(
{exec.release(),
nss.ns(),
- txn->recoveryUnit()->isReadingFromMajorityCommittedSnapshot(),
+ opCtx->recoveryUnit()->isReadingFromMajorityCommittedSnapshot(),
0,
cmdObj.getOwned(),
isAggCursor}));
@@ -533,7 +536,7 @@ public:
result << "stages" << Value(pipeline->writeExplainOps());
} else {
// Cursor must be specified, if explain is not.
- keepCursor = handleCursorCommand(txn,
+ keepCursor = handleCursorCommand(opCtx,
origNss.ns(),
pin ? pin->getCursor() : nullptr,
pin ? pin->getCursor()->getExecutor() : exec.get(),
@@ -556,8 +559,8 @@ public:
// AutoGetCollectionForRead. AutoGetCollectionForRead will throw if the
// sharding version is out of date, and we don't care if the sharding version
// has changed.
- Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_IS);
- Lock::CollectionLock collLock(txn->lockState(), nss.ns(), MODE_IS);
+ Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_IS);
+ Lock::CollectionLock collLock(opCtx->lockState(), nss.ns(), MODE_IS);
if (keepCursor) {
pin->release();
} else {
@@ -567,8 +570,8 @@ public:
} catch (...) {
// On our way out of scope, we clean up our ClientCursorPin if needed.
if (pin) {
- Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_IS);
- Lock::CollectionLock collLock(txn->lockState(), nss.ns(), MODE_IS);
+ Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_IS);
+ Lock::CollectionLock collLock(opCtx->lockState(), nss.ns(), MODE_IS);
pin->deleteUnderlying();
}
throw;
@@ -577,7 +580,7 @@ public:
return appendCommandStatus(result, Status::OK());
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& db,
BSONObj& cmdObj,
int options,
@@ -607,7 +610,7 @@ public:
"http://dochub.mongodb.org/core/3.4-feature-compatibility."));
}
- return runParsed(txn, nss, request.getValue(), cmdObj, errmsg, result);
+ return runParsed(opCtx, nss, request.getValue(), cmdObj, errmsg, result);
}
};
diff --git a/src/mongo/db/commands/plan_cache_commands.cpp b/src/mongo/db/commands/plan_cache_commands.cpp
index 67b244d6bb3..4cd4ca26992 100644
--- a/src/mongo/db/commands/plan_cache_commands.cpp
+++ b/src/mongo/db/commands/plan_cache_commands.cpp
@@ -58,7 +58,7 @@ using namespace mongo;
/**
* Retrieves a collection's plan cache from the database.
*/
-static Status getPlanCache(OperationContext* txn,
+static Status getPlanCache(OperationContext* opCtx,
Collection* collection,
const string& ns,
PlanCache** planCacheOut) {
@@ -110,14 +110,14 @@ PlanCacheCommand::PlanCacheCommand(const string& name,
ActionType actionType)
: Command(name), helpText(helpText), actionType(actionType) {}
-bool PlanCacheCommand::run(OperationContext* txn,
+bool PlanCacheCommand::run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
- Status status = runPlanCacheCommand(txn, nss.ns(), cmdObj, &result);
+ Status status = runPlanCacheCommand(opCtx, nss.ns(), cmdObj, &result);
return appendCommandStatus(result, status);
}
@@ -152,7 +152,7 @@ Status PlanCacheCommand::checkAuthForCommand(Client* client,
}
// static
-StatusWith<unique_ptr<CanonicalQuery>> PlanCacheCommand::canonicalize(OperationContext* txn,
+StatusWith<unique_ptr<CanonicalQuery>> PlanCacheCommand::canonicalize(OperationContext* opCtx,
const string& ns,
const BSONObj& cmdObj) {
// query - required
@@ -208,8 +208,8 @@ StatusWith<unique_ptr<CanonicalQuery>> PlanCacheCommand::canonicalize(OperationC
qr->setSort(sortObj);
qr->setProj(projObj);
qr->setCollation(collationObj);
- const ExtensionsCallbackReal extensionsCallback(txn, &nss);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
+ const ExtensionsCallbackReal extensionsCallback(opCtx, &nss);
+ auto statusWithCQ = CanonicalQuery::canonicalize(opCtx, std::move(qr), extensionsCallback);
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
@@ -222,15 +222,15 @@ PlanCacheListQueryShapes::PlanCacheListQueryShapes()
"Displays all query shapes in a collection.",
ActionType::planCacheRead) {}
-Status PlanCacheListQueryShapes::runPlanCacheCommand(OperationContext* txn,
+Status PlanCacheListQueryShapes::runPlanCacheCommand(OperationContext* opCtx,
const string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob) {
// This is a read lock. The query cache is owned by the collection.
- AutoGetCollectionForRead ctx(txn, NamespaceString(ns));
+ AutoGetCollectionForRead ctx(opCtx, NamespaceString(ns));
PlanCache* planCache;
- Status status = getPlanCache(txn, ctx.getCollection(), ns, &planCache);
+ Status status = getPlanCache(opCtx, ctx.getCollection(), ns, &planCache);
if (!status.isOK()) {
// No collection - return results with empty shapes array.
BSONArrayBuilder arrayBuilder(bob->subarrayStart("shapes"));
@@ -274,24 +274,24 @@ PlanCacheClear::PlanCacheClear()
"Drops one or all cached queries in a collection.",
ActionType::planCacheWrite) {}
-Status PlanCacheClear::runPlanCacheCommand(OperationContext* txn,
+Status PlanCacheClear::runPlanCacheCommand(OperationContext* opCtx,
const std::string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob) {
// This is a read lock. The query cache is owned by the collection.
- AutoGetCollectionForRead ctx(txn, NamespaceString(ns));
+ AutoGetCollectionForRead ctx(opCtx, NamespaceString(ns));
PlanCache* planCache;
- Status status = getPlanCache(txn, ctx.getCollection(), ns, &planCache);
+ Status status = getPlanCache(opCtx, ctx.getCollection(), ns, &planCache);
if (!status.isOK()) {
// No collection - nothing to do. Return OK status.
return Status::OK();
}
- return clear(txn, planCache, ns, cmdObj);
+ return clear(opCtx, planCache, ns, cmdObj);
}
// static
-Status PlanCacheClear::clear(OperationContext* txn,
+Status PlanCacheClear::clear(OperationContext* opCtx,
PlanCache* planCache,
const string& ns,
const BSONObj& cmdObj) {
@@ -302,7 +302,7 @@ Status PlanCacheClear::clear(OperationContext* txn,
// - clear plans for single query shape when a query shape is described in the
// command arguments.
if (cmdObj.hasField("query")) {
- auto statusWithCQ = PlanCacheCommand::canonicalize(txn, ns, cmdObj);
+ auto statusWithCQ = PlanCacheCommand::canonicalize(opCtx, ns, cmdObj);
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
@@ -351,30 +351,30 @@ PlanCacheListPlans::PlanCacheListPlans()
"Displays the cached plans for a query shape.",
ActionType::planCacheRead) {}
-Status PlanCacheListPlans::runPlanCacheCommand(OperationContext* txn,
+Status PlanCacheListPlans::runPlanCacheCommand(OperationContext* opCtx,
const std::string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob) {
- AutoGetCollectionForRead ctx(txn, NamespaceString(ns));
+ AutoGetCollectionForRead ctx(opCtx, NamespaceString(ns));
PlanCache* planCache;
- Status status = getPlanCache(txn, ctx.getCollection(), ns, &planCache);
+ Status status = getPlanCache(opCtx, ctx.getCollection(), ns, &planCache);
if (!status.isOK()) {
// No collection - return empty plans array.
BSONArrayBuilder plansBuilder(bob->subarrayStart("plans"));
plansBuilder.doneFast();
return Status::OK();
}
- return list(txn, *planCache, ns, cmdObj, bob);
+ return list(opCtx, *planCache, ns, cmdObj, bob);
}
// static
-Status PlanCacheListPlans::list(OperationContext* txn,
+Status PlanCacheListPlans::list(OperationContext* opCtx,
const PlanCache& planCache,
const std::string& ns,
const BSONObj& cmdObj,
BSONObjBuilder* bob) {
- auto statusWithCQ = canonicalize(txn, ns, cmdObj);
+ auto statusWithCQ = canonicalize(opCtx, ns, cmdObj);
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
diff --git a/src/mongo/db/commands/plan_cache_commands.h b/src/mongo/db/commands/plan_cache_commands.h
index 1b6afaf2171..881bf475433 100644
--- a/src/mongo/db/commands/plan_cache_commands.h
+++ b/src/mongo/db/commands/plan_cache_commands.h
@@ -57,7 +57,7 @@ public:
* implement plan cache command functionality.
*/
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -86,7 +86,7 @@ public:
* Should contain just enough logic to invoke run*Command() function
* in plan_cache.h
*/
- virtual Status runPlanCacheCommand(OperationContext* txn,
+ virtual Status runPlanCacheCommand(OperationContext* opCtx,
const std::string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob) = 0;
@@ -94,7 +94,7 @@ public:
/**
* Validatess query shape from command object and returns canonical query.
*/
- static StatusWith<std::unique_ptr<CanonicalQuery>> canonicalize(OperationContext* txn,
+ static StatusWith<std::unique_ptr<CanonicalQuery>> canonicalize(OperationContext* opCtx,
const std::string& ns,
const BSONObj& cmdObj);
@@ -112,7 +112,7 @@ private:
class PlanCacheListQueryShapes : public PlanCacheCommand {
public:
PlanCacheListQueryShapes();
- virtual Status runPlanCacheCommand(OperationContext* txn,
+ virtual Status runPlanCacheCommand(OperationContext* opCtx,
const std::string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob);
@@ -138,7 +138,7 @@ public:
class PlanCacheClear : public PlanCacheCommand {
public:
PlanCacheClear();
- virtual Status runPlanCacheCommand(OperationContext* txn,
+ virtual Status runPlanCacheCommand(OperationContext* opCtx,
const std::string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob);
@@ -147,7 +147,7 @@ public:
* Clears collection's plan cache.
* If query shape is provided, clears plans for that single query shape only.
*/
- static Status clear(OperationContext* txn,
+ static Status clear(OperationContext* opCtx,
PlanCache* planCache,
const std::string& ns,
const BSONObj& cmdObj);
@@ -167,7 +167,7 @@ public:
class PlanCacheListPlans : public PlanCacheCommand {
public:
PlanCacheListPlans();
- virtual Status runPlanCacheCommand(OperationContext* txn,
+ virtual Status runPlanCacheCommand(OperationContext* opCtx,
const std::string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob);
@@ -175,7 +175,7 @@ public:
/**
* Displays the cached plans for a query shape.
*/
- static Status list(OperationContext* txn,
+ static Status list(OperationContext* opCtx,
const PlanCache& planCache,
const std::string& ns,
const BSONObj& cmdObj,
diff --git a/src/mongo/db/commands/plan_cache_commands_test.cpp b/src/mongo/db/commands/plan_cache_commands_test.cpp
index 4975a557443..1ec3611ccdf 100644
--- a/src/mongo/db/commands/plan_cache_commands_test.cpp
+++ b/src/mongo/db/commands/plan_cache_commands_test.cpp
@@ -133,7 +133,7 @@ TEST(PlanCacheCommandsTest, planCacheListQueryShapesEmpty) {
TEST(PlanCacheCommandsTest, planCacheListQueryShapesOneKey) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
// Create a canonical query
auto qr = stdx::make_unique<QueryRequest>(nss);
@@ -142,7 +142,7 @@ TEST(PlanCacheCommandsTest, planCacheListQueryShapesOneKey) {
qr->setProj(fromjson("{_id: 0}"));
qr->setCollation(fromjson("{locale: 'mock_reverse_string'}"));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -168,13 +168,13 @@ TEST(PlanCacheCommandsTest, planCacheListQueryShapesOneKey) {
TEST(PlanCacheCommandsTest, planCacheClearAllShapes) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
// Create a canonical query
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson("{a: 1}"));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -189,7 +189,7 @@ TEST(PlanCacheCommandsTest, planCacheClearAllShapes) {
ASSERT_EQUALS(getShapes(planCache).size(), 1U);
// Clear cache and confirm number of keys afterwards.
- ASSERT_OK(PlanCacheClear::clear(txn.get(), &planCache, nss.ns(), BSONObj()));
+ ASSERT_OK(PlanCacheClear::clear(opCtx.get(), &planCache, nss.ns(), BSONObj()));
ASSERT_EQUALS(getShapes(planCache).size(), 0U);
}
@@ -202,68 +202,69 @@ TEST(PlanCacheCommandsTest, Canonicalize) {
// Invalid parameters
PlanCache planCache;
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
// Missing query field
- ASSERT_NOT_OK(PlanCacheCommand::canonicalize(txn.get(), nss.ns(), fromjson("{}")).getStatus());
+ ASSERT_NOT_OK(
+ PlanCacheCommand::canonicalize(opCtx.get(), nss.ns(), fromjson("{}")).getStatus());
// Query needs to be an object
ASSERT_NOT_OK(
- PlanCacheCommand::canonicalize(txn.get(), nss.ns(), fromjson("{query: 1}")).getStatus());
+ PlanCacheCommand::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: 1}")).getStatus());
// Sort needs to be an object
ASSERT_NOT_OK(
- PlanCacheCommand::canonicalize(txn.get(), nss.ns(), fromjson("{query: {}, sort: 1}"))
+ PlanCacheCommand::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {}, sort: 1}"))
.getStatus());
// Projection needs to be an object.
- ASSERT_NOT_OK(
- PlanCacheCommand::canonicalize(txn.get(), nss.ns(), fromjson("{query: {}, projection: 1}"))
- .getStatus());
+ ASSERT_NOT_OK(PlanCacheCommand::canonicalize(
+ opCtx.get(), nss.ns(), fromjson("{query: {}, projection: 1}"))
+ .getStatus());
// Collation needs to be an object.
ASSERT_NOT_OK(
- PlanCacheCommand::canonicalize(txn.get(), nss.ns(), fromjson("{query: {}, collation: 1}"))
+ PlanCacheCommand::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {}, collation: 1}"))
.getStatus());
// Bad query (invalid sort order)
ASSERT_NOT_OK(
- PlanCacheCommand::canonicalize(txn.get(), nss.ns(), fromjson("{query: {}, sort: {a: 0}}"))
+ PlanCacheCommand::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {}, sort: {a: 0}}"))
.getStatus());
// Valid parameters
auto statusWithCQ =
- PlanCacheCommand::canonicalize(txn.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}}"));
+ PlanCacheCommand::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}}"));
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> query = std::move(statusWithCQ.getValue());
// Equivalent query should generate same key.
statusWithCQ =
- PlanCacheCommand::canonicalize(txn.get(), nss.ns(), fromjson("{query: {b: 1, a: 1}}"));
+ PlanCacheCommand::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {b: 1, a: 1}}"));
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> equivQuery = std::move(statusWithCQ.getValue());
ASSERT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*equivQuery));
// Sort query should generate different key from unsorted query.
statusWithCQ = PlanCacheCommand::canonicalize(
- txn.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, sort: {a: 1, b: 1}}"));
+ opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, sort: {a: 1, b: 1}}"));
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> sortQuery1 = std::move(statusWithCQ.getValue());
ASSERT_NOT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*sortQuery1));
// Confirm sort arguments are properly delimited (SERVER-17158)
statusWithCQ = PlanCacheCommand::canonicalize(
- txn.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, sort: {aab: 1}}"));
+ opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, sort: {aab: 1}}"));
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> sortQuery2 = std::move(statusWithCQ.getValue());
ASSERT_NOT_EQUALS(planCache.computeKey(*sortQuery1), planCache.computeKey(*sortQuery2));
// Changing order and/or value of predicates should not change key
statusWithCQ = PlanCacheCommand::canonicalize(
- txn.get(), nss.ns(), fromjson("{query: {b: 3, a: 3}, sort: {a: 1, b: 1}}"));
+ opCtx.get(), nss.ns(), fromjson("{query: {b: 3, a: 3}, sort: {a: 1, b: 1}}"));
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> sortQuery3 = std::move(statusWithCQ.getValue());
ASSERT_EQUALS(planCache.computeKey(*sortQuery1), planCache.computeKey(*sortQuery3));
// Projected query should generate different key from unprojected query.
statusWithCQ = PlanCacheCommand::canonicalize(
- txn.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, projection: {_id: 0, a: 1}}"));
+ opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, projection: {_id: 0, a: 1}}"));
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> projectionQuery = std::move(statusWithCQ.getValue());
ASSERT_NOT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*projectionQuery));
@@ -275,47 +276,47 @@ TEST(PlanCacheCommandsTest, Canonicalize) {
TEST(PlanCacheCommandsTest, planCacheClearInvalidParameter) {
PlanCache planCache;
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
// Query field type must be BSON object.
- ASSERT_NOT_OK(PlanCacheClear::clear(&txn, &planCache, nss.ns(), fromjson("{query: 12345}")));
+ ASSERT_NOT_OK(PlanCacheClear::clear(&opCtx, &planCache, nss.ns(), fromjson("{query: 12345}")));
ASSERT_NOT_OK(
- PlanCacheClear::clear(&txn, &planCache, nss.ns(), fromjson("{query: /keyisnotregex/}")));
+ PlanCacheClear::clear(&opCtx, &planCache, nss.ns(), fromjson("{query: /keyisnotregex/}")));
// Query must pass canonicalization.
ASSERT_NOT_OK(PlanCacheClear::clear(
- &txn, &planCache, nss.ns(), fromjson("{query: {a: {$no_such_op: 1}}}")));
+ &opCtx, &planCache, nss.ns(), fromjson("{query: {a: {$no_such_op: 1}}}")));
// Sort present without query is an error.
- ASSERT_NOT_OK(PlanCacheClear::clear(&txn, &planCache, nss.ns(), fromjson("{sort: {a: 1}}")));
+ ASSERT_NOT_OK(PlanCacheClear::clear(&opCtx, &planCache, nss.ns(), fromjson("{sort: {a: 1}}")));
// Projection present without query is an error.
ASSERT_NOT_OK(PlanCacheClear::clear(
- &txn, &planCache, nss.ns(), fromjson("{projection: {_id: 0, a: 1}}")));
+ &opCtx, &planCache, nss.ns(), fromjson("{projection: {_id: 0, a: 1}}")));
// Collation present without query is an error.
ASSERT_NOT_OK(PlanCacheClear::clear(
- &txn, &planCache, nss.ns(), fromjson("{collation: {locale: 'en_US'}}")));
+ &opCtx, &planCache, nss.ns(), fromjson("{collation: {locale: 'en_US'}}")));
}
TEST(PlanCacheCommandsTest, planCacheClearUnknownKey) {
PlanCache planCache;
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
- ASSERT_OK(PlanCacheClear::clear(&txn, &planCache, nss.ns(), fromjson("{query: {a: 1}}")));
+ ASSERT_OK(PlanCacheClear::clear(&opCtx, &planCache, nss.ns(), fromjson("{query: {a: 1}}")));
}
TEST(PlanCacheCommandsTest, planCacheClearOneKey) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
// Create 2 canonical queries.
auto qrA = stdx::make_unique<QueryRequest>(nss);
qrA->setFilter(fromjson("{a: 1}"));
auto statusWithCQA = CanonicalQuery::canonicalize(
- txn.get(), std::move(qrA), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qrA), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQA.getStatus());
auto qrB = stdx::make_unique<QueryRequest>(nss);
qrB->setFilter(fromjson("{b: 1}"));
unique_ptr<CanonicalQuery> cqA = std::move(statusWithCQA.getValue());
auto statusWithCQB = CanonicalQuery::canonicalize(
- txn.get(), std::move(qrB), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qrB), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQB.getStatus());
unique_ptr<CanonicalQuery> cqB = std::move(statusWithCQB.getValue());
@@ -350,7 +351,7 @@ TEST(PlanCacheCommandsTest, planCacheClearOneKey) {
BSONObjBuilder bob;
ASSERT_OK(PlanCacheClear::clear(
- txn.get(), &planCache, nss.ns(), BSON("query" << cqB->getQueryObj())));
+ opCtx.get(), &planCache, nss.ns(), BSON("query" << cqB->getQueryObj())));
vector<BSONObj> shapesAfter = getShapes(planCache);
ASSERT_EQUALS(shapesAfter.size(), 1U);
ASSERT_BSONOBJ_EQ(shapesAfter[0], shapeA);
@@ -358,20 +359,20 @@ TEST(PlanCacheCommandsTest, planCacheClearOneKey) {
TEST(PlanCacheCommandsTest, planCacheClearOneKeyCollation) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
// Create 2 canonical queries, one with collation.
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson("{a: 'foo'}"));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
auto qrCollation = stdx::make_unique<QueryRequest>(nss);
qrCollation->setFilter(fromjson("{a: 'foo'}"));
qrCollation->setCollation(fromjson("{locale: 'mock_reverse_string'}"));
auto statusWithCQCollation = CanonicalQuery::canonicalize(
- txn.get(), std::move(qrCollation), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qrCollation), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQCollation.getStatus());
unique_ptr<CanonicalQuery> cqCollation = std::move(statusWithCQCollation.getValue());
@@ -412,7 +413,7 @@ TEST(PlanCacheCommandsTest, planCacheClearOneKeyCollation) {
// Drop query with collation from cache. Make other query is still in cache afterwards.
BSONObjBuilder bob;
- ASSERT_OK(PlanCacheClear::clear(txn.get(), &planCache, nss.ns(), shapeWithCollation));
+ ASSERT_OK(PlanCacheClear::clear(opCtx.get(), &planCache, nss.ns(), shapeWithCollation));
vector<BSONObj> shapesAfter = getShapes(planCache);
ASSERT_EQUALS(shapesAfter.size(), 1U);
ASSERT_BSONOBJ_EQ(shapesAfter[0], shape);
@@ -464,7 +465,7 @@ vector<BSONObj> getPlans(const PlanCache& planCache,
const BSONObj& projection,
const BSONObj& collation) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
BSONObjBuilder bob;
BSONObjBuilder cmdObjBuilder;
@@ -475,7 +476,7 @@ vector<BSONObj> getPlans(const PlanCache& planCache,
cmdObjBuilder.append("collation", collation);
}
BSONObj cmdObj = cmdObjBuilder.obj();
- ASSERT_OK(PlanCacheListPlans::list(txn.get(), planCache, nss.ns(), cmdObj, &bob));
+ ASSERT_OK(PlanCacheListPlans::list(opCtx.get(), planCache, nss.ns(), cmdObj, &bob));
BSONObj resultObj = bob.obj();
BSONElement plansElt = resultObj.getField("plans");
ASSERT_EQUALS(plansElt.type(), mongo::Array);
@@ -489,36 +490,36 @@ vector<BSONObj> getPlans(const PlanCache& planCache,
TEST(PlanCacheCommandsTest, planCacheListPlansInvalidParameter) {
PlanCache planCache;
BSONObjBuilder ignored;
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
// Missing query field is not ok.
- ASSERT_NOT_OK(PlanCacheListPlans::list(&txn, planCache, nss.ns(), BSONObj(), &ignored));
+ ASSERT_NOT_OK(PlanCacheListPlans::list(&opCtx, planCache, nss.ns(), BSONObj(), &ignored));
// Query field type must be BSON object.
- ASSERT_NOT_OK(
- PlanCacheListPlans::list(&txn, planCache, nss.ns(), fromjson("{query: 12345}"), &ignored));
ASSERT_NOT_OK(PlanCacheListPlans::list(
- &txn, planCache, nss.ns(), fromjson("{query: /keyisnotregex/}"), &ignored));
+ &opCtx, planCache, nss.ns(), fromjson("{query: 12345}"), &ignored));
+ ASSERT_NOT_OK(PlanCacheListPlans::list(
+ &opCtx, planCache, nss.ns(), fromjson("{query: /keyisnotregex/}"), &ignored));
}
TEST(PlanCacheCommandsTest, planCacheListPlansUnknownKey) {
// Leave the plan cache empty.
PlanCache planCache;
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
BSONObjBuilder ignored;
- ASSERT_OK(
- PlanCacheListPlans::list(&txn, planCache, nss.ns(), fromjson("{query: {a: 1}}"), &ignored));
+ ASSERT_OK(PlanCacheListPlans::list(
+ &opCtx, planCache, nss.ns(), fromjson("{query: {a: 1}}"), &ignored));
}
TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionTrue) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
// Create a canonical query
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson("{a: 1}"));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -540,13 +541,13 @@ TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionTrue) {
TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionFalse) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
// Create a canonical query
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson("{a: 1}"));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -571,20 +572,20 @@ TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionFalse) {
TEST(PlanCacheCommandsTest, planCacheListPlansCollation) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
// Create 2 canonical queries, one with collation.
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson("{a: 'foo'}"));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
auto qrCollation = stdx::make_unique<QueryRequest>(nss);
qrCollation->setFilter(fromjson("{a: 'foo'}"));
qrCollation->setCollation(fromjson("{locale: 'mock_reverse_string'}"));
auto statusWithCQCollation = CanonicalQuery::canonicalize(
- txn.get(), std::move(qrCollation), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qrCollation), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQCollation.getStatus());
unique_ptr<CanonicalQuery> cqCollation = std::move(statusWithCQCollation.getValue());
diff --git a/src/mongo/db/commands/rename_collection_cmd.cpp b/src/mongo/db/commands/rename_collection_cmd.cpp
index 99f5617cf94..6aaa3b5f744 100644
--- a/src/mongo/db/commands/rename_collection_cmd.cpp
+++ b/src/mongo/db/commands/rename_collection_cmd.cpp
@@ -77,15 +77,15 @@ public:
help << " example: { renameCollection: foo.a, to: bar.b }";
}
- static void dropCollection(OperationContext* txn, Database* db, StringData collName) {
- WriteUnitOfWork wunit(txn);
- if (db->dropCollection(txn, collName).isOK()) {
+ static void dropCollection(OperationContext* opCtx, Database* db, StringData collName) {
+ WriteUnitOfWork wunit(opCtx);
+ if (db->dropCollection(opCtx, collName).isOK()) {
// ignoring failure case
wunit.commit();
}
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -146,7 +146,7 @@ public:
}
return appendCommandStatus(result,
- renameCollection(txn,
+ renameCollection(opCtx,
source,
target,
cmdObj["dropTarget"].trueValue(),
diff --git a/src/mongo/db/commands/repair_cursor.cpp b/src/mongo/db/commands/repair_cursor.cpp
index 4e34e0bbb0f..b5d7c2fde6f 100644
--- a/src/mongo/db/commands/repair_cursor.cpp
+++ b/src/mongo/db/commands/repair_cursor.cpp
@@ -67,7 +67,7 @@ public:
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -75,7 +75,7 @@ public:
BSONObjBuilder& result) {
NamespaceString ns(parseNs(dbname, cmdObj));
- AutoGetCollectionForRead ctx(txn, ns);
+ AutoGetCollectionForRead ctx(opCtx, ns);
Collection* collection = ctx.getCollection();
if (!collection) {
@@ -83,7 +83,7 @@ public:
result, Status(ErrorCodes::NamespaceNotFound, "ns does not exist: " + ns.ns()));
}
- auto cursor = collection->getRecordStore()->getCursorForRepair(txn);
+ auto cursor = collection->getRecordStore()->getCursorForRepair(opCtx);
if (!cursor) {
return appendCommandStatus(
result, Status(ErrorCodes::CommandNotSupported, "repair iterator not supported"));
@@ -91,11 +91,11 @@ public:
std::unique_ptr<WorkingSet> ws(new WorkingSet());
std::unique_ptr<MultiIteratorStage> stage(
- new MultiIteratorStage(txn, ws.get(), collection));
+ new MultiIteratorStage(opCtx, ws.get(), collection));
stage->addIterator(std::move(cursor));
auto statusWithPlanExecutor = PlanExecutor::make(
- txn, std::move(ws), std::move(stage), collection, PlanExecutor::YIELD_AUTO);
+ opCtx, std::move(ws), std::move(stage), collection, PlanExecutor::YIELD_AUTO);
invariant(statusWithPlanExecutor.isOK());
std::unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
@@ -109,7 +109,7 @@ public:
auto pinnedCursor = collection->getCursorManager()->registerCursor(
{exec.release(),
ns.ns(),
- txn->recoveryUnit()->isReadingFromMajorityCommittedSnapshot()});
+ opCtx->recoveryUnit()->isReadingFromMajorityCommittedSnapshot()});
appendCursorResponseObject(
pinnedCursor.getCursor()->cursorid(), ns.ns(), BSONArray(), &result);
diff --git a/src/mongo/db/commands/server_status.cpp b/src/mongo/db/commands/server_status.cpp
index 67716cf7a14..fd429ed21b1 100644
--- a/src/mongo/db/commands/server_status.cpp
+++ b/src/mongo/db/commands/server_status.cpp
@@ -85,7 +85,7 @@ public:
actions.addAction(ActionType::serverStatus);
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -93,7 +93,7 @@ public:
BSONObjBuilder& result) {
_runCalled = true;
- const auto service = txn->getServiceContext();
+ const auto service = opCtx->getServiceContext();
const auto clock = service->getFastClockSource();
const auto runStart = clock->now();
BSONObjBuilder timeBuilder(256);
@@ -135,7 +135,7 @@ public:
continue;
}
- section->appendSection(txn, elem, &result);
+ section->appendSection(opCtx, elem, &result);
timeBuilder.appendNumber(
static_cast<string>(str::stream() << "after " << section->getSectionName()),
durationCount<Milliseconds>(clock->now() - runStart));
@@ -201,7 +201,7 @@ OpCounterServerStatusSection::OpCounterServerStatusSection(const string& section
OpCounters* counters)
: ServerStatusSection(sectionName), _counters(counters) {}
-BSONObj OpCounterServerStatusSection::generateSection(OperationContext* txn,
+BSONObj OpCounterServerStatusSection::generateSection(OperationContext* opCtx,
const BSONElement& configElement) const {
return _counters->getObj();
}
@@ -220,9 +220,9 @@ public:
return true;
}
- BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ BSONObj generateSection(OperationContext* opCtx, const BSONElement& configElement) const {
BSONObjBuilder bb;
- auto stats = txn->getServiceContext()->getTransportLayer()->sessionStats();
+ auto stats = opCtx->getServiceContext()->getTransportLayer()->sessionStats();
bb.append("current", static_cast<int>(stats.numOpenSessions));
bb.append("available", static_cast<int>(stats.numAvailableSessions));
bb.append("totalCreated", static_cast<int>(stats.numCreatedSessions));
@@ -238,7 +238,7 @@ public:
return true;
}
- BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ BSONObj generateSection(OperationContext* opCtx, const BSONElement& configElement) const {
BSONObjBuilder bb;
bb.append("note", "fields vary by platform");
@@ -258,7 +258,7 @@ public:
return true;
}
- BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ BSONObj generateSection(OperationContext* opCtx, const BSONElement& configElement) const {
BSONObjBuilder asserts;
asserts.append("regular", assertionCount.regular);
asserts.append("warning", assertionCount.warning);
@@ -278,7 +278,7 @@ public:
return true;
}
- BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ BSONObj generateSection(OperationContext* opCtx, const BSONElement& configElement) const {
BSONObjBuilder b;
networkCounter.append(b);
appendMessageCompressionStats(&b);
@@ -295,7 +295,7 @@ public:
return true;
}
- BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ BSONObj generateSection(OperationContext* opCtx, const BSONElement& configElement) const {
BSONObj result;
if (getSSLManager()) {
result = getSSLManager()->getSSLConfiguration().getServerStatusBSON();
@@ -334,7 +334,7 @@ public:
return false;
}
- void appendSection(OperationContext* txn,
+ void appendSection(OperationContext* opCtx,
const BSONElement& configElement,
BSONObjBuilder* out) const override {
out->append(
diff --git a/src/mongo/db/commands/server_status.h b/src/mongo/db/commands/server_status.h
index b017688acf2..506c1428629 100644
--- a/src/mongo/db/commands/server_status.h
+++ b/src/mongo/db/commands/server_status.h
@@ -80,7 +80,8 @@ public:
* @param configElement the element from the actual command related to this section
* so if the section is 'foo', this is cmdObj['foo']
*/
- virtual BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ virtual BSONObj generateSection(OperationContext* opCtx,
+ const BSONElement& configElement) const {
return BSONObj{};
};
@@ -94,10 +95,10 @@ public:
* If you are doing something a bit more complicated, you can implement this and have
* full control over what gets included in the command result.
*/
- virtual void appendSection(OperationContext* txn,
+ virtual void appendSection(OperationContext* opCtx,
const BSONElement& configElement,
BSONObjBuilder* result) const {
- const auto ret = generateSection(txn, configElement);
+ const auto ret = generateSection(opCtx, configElement);
if (ret.isEmpty())
return;
result->append(getSectionName(), ret);
@@ -114,7 +115,8 @@ public:
return true;
}
- virtual BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const;
+ virtual BSONObj generateSection(OperationContext* opCtx,
+ const BSONElement& configElement) const;
private:
const OpCounters* _counters;
diff --git a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
index 4d6c7f9867d..6e173143efc 100644
--- a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
+++ b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
@@ -81,7 +81,7 @@ public:
return Status::OK();
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -90,7 +90,7 @@ public:
const auto version = uassertStatusOK(
FeatureCompatibilityVersionCommandParser::extractVersionFromCommand(getName(), cmdObj));
- FeatureCompatibilityVersion::set(txn, version);
+ FeatureCompatibilityVersion::set(opCtx, version);
return true;
}
diff --git a/src/mongo/db/commands/snapshot_management.cpp b/src/mongo/db/commands/snapshot_management.cpp
index 5c215c4c6f4..8ab963eb71e 100644
--- a/src/mongo/db/commands/snapshot_management.cpp
+++ b/src/mongo/db/commands/snapshot_management.cpp
@@ -63,7 +63,7 @@ public:
h << "Creates a new named snapshot";
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int,
@@ -75,14 +75,15 @@ public:
return appendCommandStatus(result, {ErrorCodes::CommandNotSupported, ""});
}
- ScopedTransaction st(txn, MODE_IX);
- Lock::GlobalLock lk(txn->lockState(), MODE_IX, UINT_MAX);
+ ScopedTransaction st(opCtx, MODE_IX);
+ Lock::GlobalLock lk(opCtx->lockState(), MODE_IX, UINT_MAX);
- auto status = snapshotManager->prepareForCreateSnapshot(txn);
+ auto status = snapshotManager->prepareForCreateSnapshot(opCtx);
if (status.isOK()) {
- const auto name = repl::ReplicationCoordinator::get(txn)->reserveSnapshotName(nullptr);
+ const auto name =
+ repl::ReplicationCoordinator::get(opCtx)->reserveSnapshotName(nullptr);
result.append("name", static_cast<long long>(name.asU64()));
- status = snapshotManager->createSnapshot(txn, name);
+ status = snapshotManager->createSnapshot(opCtx, name);
}
return appendCommandStatus(result, status);
}
@@ -113,7 +114,7 @@ public:
h << "Sets the snapshot for {readConcern: {level: 'majority'}}";
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int,
@@ -125,8 +126,8 @@ public:
return appendCommandStatus(result, {ErrorCodes::CommandNotSupported, ""});
}
- ScopedTransaction st(txn, MODE_IX);
- Lock::GlobalLock lk(txn->lockState(), MODE_IX, UINT_MAX);
+ ScopedTransaction st(opCtx, MODE_IX);
+ Lock::GlobalLock lk(opCtx->lockState(), MODE_IX, UINT_MAX);
auto name = SnapshotName(cmdObj.firstElement().Long());
snapshotManager->setCommittedSnapshot(name);
return true;
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index 560e6496688..34bc757d554 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -75,7 +75,7 @@ public:
virtual void help(stringstream& help) const {
help << "internal. for testing only.";
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -85,23 +85,23 @@ public:
log() << "test only command godinsert invoked coll:" << nss.coll();
BSONObj obj = cmdObj["obj"].embeddedObjectUserCheck();
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock lk(txn->lockState(), dbname, MODE_X);
- OldClientContext ctx(txn, nss.ns());
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock lk(opCtx->lockState(), dbname, MODE_X);
+ OldClientContext ctx(opCtx, nss.ns());
Database* db = ctx.db();
- WriteUnitOfWork wunit(txn);
- UnreplicatedWritesBlock unreplicatedWritesBlock(txn);
+ WriteUnitOfWork wunit(opCtx);
+ UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx);
Collection* collection = db->getCollection(nss);
if (!collection) {
- collection = db->createCollection(txn, nss.ns());
+ collection = db->createCollection(opCtx, nss.ns());
if (!collection) {
errmsg = "could not create collection";
return false;
}
}
OpDebug* const nullOpDebug = nullptr;
- Status status = collection->insertDocument(txn, obj, nullOpDebug, false);
+ Status status = collection->insertDocument(opCtx, obj, nullOpDebug, false);
if (status.isOK()) {
wunit.commit();
}
@@ -140,20 +140,20 @@ public:
const BSONObj& cmdObj,
std::vector<Privilege>* out) {}
- void _sleepInReadLock(mongo::OperationContext* txn, long long millis) {
- ScopedTransaction transaction(txn, MODE_S);
- Lock::GlobalRead lk(txn->lockState());
+ void _sleepInReadLock(mongo::OperationContext* opCtx, long long millis) {
+ ScopedTransaction transaction(opCtx, MODE_S);
+ Lock::GlobalRead lk(opCtx->lockState());
sleepmillis(millis);
}
- void _sleepInWriteLock(mongo::OperationContext* txn, long long millis) {
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
+ void _sleepInWriteLock(mongo::OperationContext* opCtx, long long millis) {
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite lk(opCtx->lockState());
sleepmillis(millis);
}
CmdSleep() : Command("sleep") {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& ns,
BSONObj& cmdObj,
int,
@@ -178,9 +178,9 @@ public:
if (!cmdObj["lock"]) {
// Legacy implementation
if (cmdObj.getBoolField("w")) {
- _sleepInWriteLock(txn, millis);
+ _sleepInWriteLock(opCtx, millis);
} else {
- _sleepInReadLock(txn, millis);
+ _sleepInReadLock(opCtx, millis);
}
} else {
uassert(34346, "Only one of 'w' and 'lock' may be set.", !cmdObj["w"]);
@@ -189,15 +189,15 @@ public:
if (lock == "none") {
sleepmillis(millis);
} else if (lock == "w") {
- _sleepInWriteLock(txn, millis);
+ _sleepInWriteLock(opCtx, millis);
} else {
uassert(34347, "'lock' must be one of 'r', 'w', 'none'.", lock == "r");
- _sleepInReadLock(txn, millis);
+ _sleepInReadLock(opCtx, millis);
}
}
// Interrupt point for testing (e.g. maxTimeMS).
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
return true;
}
@@ -217,7 +217,7 @@ public:
virtual void addRequiredPrivileges(const std::string& dbname,
const BSONObj& cmdObj,
std::vector<Privilege>* out) {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -232,11 +232,11 @@ public:
{ErrorCodes::BadValue, "n must be a positive integer"});
}
- OldClientWriteContext ctx(txn, fullNs.ns());
+ OldClientWriteContext ctx(opCtx, fullNs.ns());
Collection* collection = ctx.getCollection();
if (!collection) {
- if (ctx.db()->getViewCatalog()->lookup(txn, fullNs.ns())) {
+ if (ctx.db()->getViewCatalog()->lookup(opCtx, fullNs.ns())) {
return appendCommandStatus(
result,
{ErrorCodes::CommandNotSupportedOnView,
@@ -259,7 +259,7 @@ public:
// We will remove 'n' documents, so start truncating from the (n + 1)th document to the
// end.
std::unique_ptr<PlanExecutor> exec(
- InternalPlanner::collectionScan(txn,
+ InternalPlanner::collectionScan(opCtx,
fullNs.ns(),
collection,
PlanExecutor::YIELD_MANUAL,
@@ -277,7 +277,7 @@ public:
}
}
- collection->cappedTruncateAfter(txn, end, inc);
+ collection->cappedTruncateAfter(opCtx, end, inc);
return true;
}
@@ -298,7 +298,7 @@ public:
const BSONObj& cmdObj,
std::vector<Privilege>* out) {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -306,7 +306,7 @@ public:
BSONObjBuilder& result) {
const NamespaceString nss = parseNsCollectionRequired(dbname, cmdObj);
- return appendCommandStatus(result, emptyCapped(txn, nss));
+ return appendCommandStatus(result, emptyCapped(opCtx, nss));
}
};
diff --git a/src/mongo/db/commands/top_command.cpp b/src/mongo/db/commands/top_command.cpp
index 6f236de90da..80ef9171efd 100644
--- a/src/mongo/db/commands/top_command.cpp
+++ b/src/mongo/db/commands/top_command.cpp
@@ -65,7 +65,7 @@ public:
actions.addAction(ActionType::top);
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& db,
BSONObj& cmdObj,
int options,
@@ -74,7 +74,7 @@ public:
{
BSONObjBuilder b(result.subobjStart("totals"));
b.append("note", "all times in microseconds");
- Top::get(txn->getClient()->getServiceContext()).append(b);
+ Top::get(opCtx->getClient()->getServiceContext()).append(b);
b.done();
}
return true;
diff --git a/src/mongo/db/commands/touch.cpp b/src/mongo/db/commands/touch.cpp
index a1fe53e84d8..1f28da9e3fc 100644
--- a/src/mongo/db/commands/touch.cpp
+++ b/src/mongo/db/commands/touch.cpp
@@ -82,7 +82,7 @@ public:
}
TouchCmd() : Command("touch") {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -102,7 +102,7 @@ public:
return false;
}
- AutoGetCollectionForRead context(txn, nss);
+ AutoGetCollectionForRead context(opCtx, nss);
Collection* collection = context.getCollection();
if (!collection) {
@@ -111,7 +111,7 @@ public:
}
return appendCommandStatus(result,
- collection->touch(txn, touch_data, touch_indexes, &result));
+ collection->touch(opCtx, touch_data, touch_indexes, &result));
}
};
static TouchCmd touchCmd;
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index 7d14e2f1416..b6a55727c80 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -135,13 +135,13 @@ Status privilegeVectorToBSONArray(const PrivilegeVector& privileges, BSONArray*
/**
* Used to get all current roles of the user identified by 'userName'.
*/
-Status getCurrentUserRoles(OperationContext* txn,
+Status getCurrentUserRoles(OperationContext* opCtx,
AuthorizationManager* authzManager,
const UserName& userName,
unordered_set<RoleName>* roles) {
User* user;
authzManager->invalidateUserByName(userName); // Need to make sure cache entry is up to date
- Status status = authzManager->acquireUser(txn, userName, &user);
+ Status status = authzManager->acquireUser(opCtx, userName, &user);
if (!status.isOK()) {
return status;
}
@@ -159,7 +159,7 @@ Status getCurrentUserRoles(OperationContext* txn,
* same database as the role it is being added to (or that the role being added to is from the
* "admin" database.
*/
-Status checkOkayToGrantRolesToRole(OperationContext* txn,
+Status checkOkayToGrantRolesToRole(OperationContext* opCtx,
const RoleName& role,
const std::vector<RoleName> rolesToAdd,
AuthorizationManager* authzManager) {
@@ -180,8 +180,8 @@ Status checkOkayToGrantRolesToRole(OperationContext* txn,
}
BSONObj roleToAddDoc;
- Status status =
- authzManager->getRoleDescription(txn, roleToAdd, PrivilegeFormat::kOmit, &roleToAddDoc);
+ Status status = authzManager->getRoleDescription(
+ opCtx, roleToAdd, PrivilegeFormat::kOmit, &roleToAddDoc);
if (status == ErrorCodes::RoleNotFound) {
return Status(ErrorCodes::RoleNotFound,
"Cannot grant nonexistent role " + roleToAdd.toString());
@@ -242,13 +242,13 @@ void appendBSONObjToBSONArrayBuilder(BSONArrayBuilder* array, const BSONObj& obj
* Should only be called on collections with authorization documents in them
* (ie admin.system.users and admin.system.roles).
*/
-Status queryAuthzDocument(OperationContext* txn,
+Status queryAuthzDocument(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& query,
const BSONObj& projection,
const stdx::function<void(const BSONObj&)>& resultProcessor) {
try {
- DBDirectClient client(txn);
+ DBDirectClient client(opCtx);
client.query(resultProcessor, collectionName.ns(), query, &projection);
return Status::OK();
} catch (const DBException& e) {
@@ -263,11 +263,11 @@ Status queryAuthzDocument(OperationContext* txn,
* Should only be called on collections with authorization documents in them
* (ie admin.system.users and admin.system.roles).
*/
-Status insertAuthzDocument(OperationContext* txn,
+Status insertAuthzDocument(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& document) {
try {
- DBDirectClient client(txn);
+ DBDirectClient client(opCtx);
BatchedInsertRequest req;
req.setNS(collectionName);
@@ -293,7 +293,7 @@ Status insertAuthzDocument(OperationContext* txn,
* Should only be called on collections with authorization documents in them
* (ie admin.system.users and admin.system.roles).
*/
-Status updateAuthzDocuments(OperationContext* txn,
+Status updateAuthzDocuments(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& query,
const BSONObj& updatePattern,
@@ -301,7 +301,7 @@ Status updateAuthzDocuments(OperationContext* txn,
bool multi,
long long* nMatched) {
try {
- DBDirectClient client(txn);
+ DBDirectClient client(opCtx);
auto doc = stdx::make_unique<BatchedUpdateDocument>();
doc->setQuery(query);
@@ -342,14 +342,14 @@ Status updateAuthzDocuments(OperationContext* txn,
* Should only be called on collections with authorization documents in them
* (ie admin.system.users and admin.system.roles).
*/
-Status updateOneAuthzDocument(OperationContext* txn,
+Status updateOneAuthzDocument(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& query,
const BSONObj& updatePattern,
bool upsert) {
long long nMatched;
Status status =
- updateAuthzDocuments(txn, collectionName, query, updatePattern, upsert, false, &nMatched);
+ updateAuthzDocuments(opCtx, collectionName, query, updatePattern, upsert, false, &nMatched);
if (!status.isOK()) {
return status;
}
@@ -366,12 +366,12 @@ Status updateOneAuthzDocument(OperationContext* txn,
* Should only be called on collections with authorization documents in them
* (ie admin.system.users and admin.system.roles).
*/
-Status removeAuthzDocuments(OperationContext* txn,
+Status removeAuthzDocuments(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& query,
long long* numRemoved) {
try {
- DBDirectClient client(txn);
+ DBDirectClient client(opCtx);
auto doc = stdx::make_unique<BatchedDeleteDocument>();
doc->setQuery(query);
@@ -401,9 +401,9 @@ Status removeAuthzDocuments(OperationContext* txn,
/**
* Creates the given role object in the given database.
*/
-Status insertRoleDocument(OperationContext* txn, const BSONObj& roleObj) {
+Status insertRoleDocument(OperationContext* opCtx, const BSONObj& roleObj) {
Status status =
- insertAuthzDocument(txn, AuthorizationManager::rolesCollectionNamespace, roleObj);
+ insertAuthzDocument(opCtx, AuthorizationManager::rolesCollectionNamespace, roleObj);
if (status.isOK()) {
return status;
}
@@ -422,8 +422,8 @@ Status insertRoleDocument(OperationContext* txn, const BSONObj& roleObj) {
/**
* Updates the given role object with the given update modifier.
*/
-Status updateRoleDocument(OperationContext* txn, const RoleName& role, const BSONObj& updateObj) {
- Status status = updateOneAuthzDocument(txn,
+Status updateRoleDocument(OperationContext* opCtx, const RoleName& role, const BSONObj& updateObj) {
+ Status status = updateOneAuthzDocument(opCtx,
AuthorizationManager::rolesCollectionNamespace,
BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
<< role.getRole()
@@ -448,9 +448,9 @@ Status updateRoleDocument(OperationContext* txn, const RoleName& role, const BSO
* Removes roles matching the given query.
* Writes into *numRemoved the number of role documents that were modified.
*/
-Status removeRoleDocuments(OperationContext* txn, const BSONObj& query, long long* numRemoved) {
+Status removeRoleDocuments(OperationContext* opCtx, const BSONObj& query, long long* numRemoved) {
Status status = removeAuthzDocuments(
- txn, AuthorizationManager::rolesCollectionNamespace, query, numRemoved);
+ opCtx, AuthorizationManager::rolesCollectionNamespace, query, numRemoved);
if (status.code() == ErrorCodes::UnknownError) {
return Status(ErrorCodes::RoleModificationFailed, status.reason());
}
@@ -460,9 +460,9 @@ Status removeRoleDocuments(OperationContext* txn, const BSONObj& query, long lon
/**
* Creates the given user object in the given database.
*/
-Status insertPrivilegeDocument(OperationContext* txn, const BSONObj& userObj) {
+Status insertPrivilegeDocument(OperationContext* opCtx, const BSONObj& userObj) {
Status status =
- insertAuthzDocument(txn, AuthorizationManager::usersCollectionNamespace, userObj);
+ insertAuthzDocument(opCtx, AuthorizationManager::usersCollectionNamespace, userObj);
if (status.isOK()) {
return status;
}
@@ -481,10 +481,10 @@ Status insertPrivilegeDocument(OperationContext* txn, const BSONObj& userObj) {
/**
* Updates the given user object with the given update modifier.
*/
-Status updatePrivilegeDocument(OperationContext* txn,
+Status updatePrivilegeDocument(OperationContext* opCtx,
const UserName& user,
const BSONObj& updateObj) {
- Status status = updateOneAuthzDocument(txn,
+ Status status = updateOneAuthzDocument(opCtx,
AuthorizationManager::usersCollectionNamespace,
BSON(AuthorizationManager::USER_NAME_FIELD_NAME
<< user.getUser()
@@ -509,11 +509,11 @@ Status updatePrivilegeDocument(OperationContext* txn,
* Removes users for the given database matching the given query.
* Writes into *numRemoved the number of user documents that were modified.
*/
-Status removePrivilegeDocuments(OperationContext* txn,
+Status removePrivilegeDocuments(OperationContext* opCtx,
const BSONObj& query,
long long* numRemoved) {
Status status = removeAuthzDocuments(
- txn, AuthorizationManager::usersCollectionNamespace, query, numRemoved);
+ opCtx, AuthorizationManager::usersCollectionNamespace, query, numRemoved);
if (status.code() == ErrorCodes::UnknownError) {
return Status(ErrorCodes::UserModificationFailed, status.reason());
}
@@ -524,11 +524,11 @@ Status removePrivilegeDocuments(OperationContext* txn,
* Updates the auth schema version document to reflect the current state of the system.
* 'foundSchemaVersion' is the authSchemaVersion to update with.
*/
-Status writeAuthSchemaVersionIfNeeded(OperationContext* txn,
+Status writeAuthSchemaVersionIfNeeded(OperationContext* opCtx,
AuthorizationManager* authzManager,
int foundSchemaVersion) {
Status status = updateOneAuthzDocument(
- txn,
+ opCtx,
AuthorizationManager::versionCollectionNamespace,
AuthorizationManager::versionDocumentQuery,
BSON("$set" << BSON(AuthorizationManager::schemaVersionFieldName << foundSchemaVersion)),
@@ -546,9 +546,10 @@ Status writeAuthSchemaVersionIfNeeded(OperationContext* txn,
* for the MongoDB 2.6 and 3.0 MongoDB-CR/SCRAM mixed auth mode.
* Returns an error otherwise.
*/
-Status requireAuthSchemaVersion26Final(OperationContext* txn, AuthorizationManager* authzManager) {
+Status requireAuthSchemaVersion26Final(OperationContext* opCtx,
+ AuthorizationManager* authzManager) {
int foundSchemaVersion;
- Status status = authzManager->getAuthorizationVersion(txn, &foundSchemaVersion);
+ Status status = authzManager->getAuthorizationVersion(opCtx, &foundSchemaVersion);
if (!status.isOK()) {
return status;
}
@@ -562,7 +563,7 @@ Status requireAuthSchemaVersion26Final(OperationContext* txn, AuthorizationManag
<< " but found "
<< foundSchemaVersion);
}
- return writeAuthSchemaVersionIfNeeded(txn, authzManager, foundSchemaVersion);
+ return writeAuthSchemaVersionIfNeeded(opCtx, authzManager, foundSchemaVersion);
}
/**
@@ -570,10 +571,10 @@ Status requireAuthSchemaVersion26Final(OperationContext* txn, AuthorizationManag
* for MongoDB 2.6 during the upgrade process.
* Returns an error otherwise.
*/
-Status requireAuthSchemaVersion26UpgradeOrFinal(OperationContext* txn,
+Status requireAuthSchemaVersion26UpgradeOrFinal(OperationContext* opCtx,
AuthorizationManager* authzManager) {
int foundSchemaVersion;
- Status status = authzManager->getAuthorizationVersion(txn, &foundSchemaVersion);
+ Status status = authzManager->getAuthorizationVersion(opCtx, &foundSchemaVersion);
if (!status.isOK()) {
return status;
}
@@ -614,7 +615,7 @@ public:
return auth::checkAuthForCreateUserCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -670,10 +671,10 @@ public:
userObjBuilder.append(AuthorizationManager::USER_NAME_FIELD_NAME, args.userName.getUser());
userObjBuilder.append(AuthorizationManager::USER_DB_FIELD_NAME, args.userName.getDB());
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
int authzVersion;
- status = authzManager->getAuthorizationVersion(txn, &authzVersion);
+ status = authzManager->getAuthorizationVersion(opCtx, &authzVersion);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -708,7 +709,7 @@ public:
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -717,7 +718,7 @@ public:
for (size_t i = 0; i < args.roles.size(); ++i) {
BSONObj ignored;
status = authzManager->getRoleDescription(
- txn, args.roles[i], PrivilegeFormat::kOmit, &ignored);
+ opCtx, args.roles[i], PrivilegeFormat::kOmit, &ignored);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -728,7 +729,7 @@ public:
args.hasHashedPassword,
args.hasCustomData ? &args.customData : NULL,
args.roles);
- status = insertPrivilegeDocument(txn, userObj);
+ status = insertPrivilegeDocument(opCtx, userObj);
return appendCommandStatus(result, status);
}
@@ -760,7 +761,7 @@ public:
return auth::checkAuthForUpdateUserCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -793,7 +794,7 @@ public:
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
int authzVersion;
- Status status = authzManager->getAuthorizationVersion(txn, &authzVersion);
+ Status status = authzManager->getAuthorizationVersion(opCtx, &authzVersion);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -815,11 +816,11 @@ public:
updateSetBuilder.append("roles", rolesVectorToBSONArray(args.roles));
}
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -830,7 +831,7 @@ public:
for (size_t i = 0; i < args.roles.size(); ++i) {
BSONObj ignored;
status = authzManager->getRoleDescription(
- txn, args.roles[i], PrivilegeFormat::kOmit, &ignored);
+ opCtx, args.roles[i], PrivilegeFormat::kOmit, &ignored);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -844,7 +845,7 @@ public:
args.hasRoles ? &args.roles : NULL);
status =
- updatePrivilegeDocument(txn, args.userName, BSON("$set" << updateSetBuilder.done()));
+ updatePrivilegeDocument(opCtx, args.userName, BSON("$set" << updateSetBuilder.done()));
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserByName(args.userName);
return appendCommandStatus(result, status);
@@ -878,7 +879,7 @@ public:
return auth::checkAuthForDropUserCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -890,10 +891,10 @@ public:
return appendCommandStatus(result, status);
}
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -901,7 +902,7 @@ public:
audit::logDropUser(Client::getCurrent(), userName);
long long nMatched;
- status = removePrivilegeDocuments(txn,
+ status = removePrivilegeDocuments(opCtx,
BSON(AuthorizationManager::USER_NAME_FIELD_NAME
<< userName.getUser()
<< AuthorizationManager::USER_DB_FIELD_NAME
@@ -947,7 +948,7 @@ public:
return auth::checkAuthForDropAllUsersFromDatabaseCommand(client, dbname);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -957,11 +958,11 @@ public:
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -970,7 +971,7 @@ public:
long long numRemoved;
status = removePrivilegeDocuments(
- txn, BSON(AuthorizationManager::USER_DB_FIELD_NAME << dbname), &numRemoved);
+ opCtx, BSON(AuthorizationManager::USER_DB_FIELD_NAME << dbname), &numRemoved);
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUsersFromDB(dbname);
if (!status.isOK()) {
@@ -1005,7 +1006,7 @@ public:
return auth::checkAuthForGrantRolesToUserCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -1019,18 +1020,18 @@ public:
return appendCommandStatus(result, status);
}
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
UserName userName(userNameString, dbname);
unordered_set<RoleName> userRoles;
- status = getCurrentUserRoles(txn, authzManager, userName, &userRoles);
+ status = getCurrentUserRoles(opCtx, authzManager, userName, &userRoles);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1039,7 +1040,7 @@ public:
RoleName& roleName = *it;
BSONObj roleDoc;
status =
- authzManager->getRoleDescription(txn, roleName, PrivilegeFormat::kOmit, &roleDoc);
+ authzManager->getRoleDescription(opCtx, roleName, PrivilegeFormat::kOmit, &roleDoc);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1050,7 +1051,7 @@ public:
audit::logGrantRolesToUser(Client::getCurrent(), userName, roles);
BSONArray newRolesBSONArray = roleSetToBSONArray(userRoles);
status = updatePrivilegeDocument(
- txn, userName, BSON("$set" << BSON("roles" << newRolesBSONArray)));
+ opCtx, userName, BSON("$set" << BSON("roles" << newRolesBSONArray)));
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserByName(userName);
return appendCommandStatus(result, status);
@@ -1080,7 +1081,7 @@ public:
return auth::checkAuthForRevokeRolesFromUserCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -1094,18 +1095,18 @@ public:
return appendCommandStatus(result, status);
}
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
UserName userName(userNameString, dbname);
unordered_set<RoleName> userRoles;
- status = getCurrentUserRoles(txn, authzManager, userName, &userRoles);
+ status = getCurrentUserRoles(opCtx, authzManager, userName, &userRoles);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1114,7 +1115,7 @@ public:
RoleName& roleName = *it;
BSONObj roleDoc;
status =
- authzManager->getRoleDescription(txn, roleName, PrivilegeFormat::kOmit, &roleDoc);
+ authzManager->getRoleDescription(opCtx, roleName, PrivilegeFormat::kOmit, &roleDoc);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1125,7 +1126,7 @@ public:
audit::logRevokeRolesFromUser(Client::getCurrent(), userName, roles);
BSONArray newRolesBSONArray = roleSetToBSONArray(userRoles);
status = updatePrivilegeDocument(
- txn, userName, BSON("$set" << BSON("roles" << newRolesBSONArray)));
+ opCtx, userName, BSON("$set" << BSON("roles" << newRolesBSONArray)));
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserByName(userName);
return appendCommandStatus(result, status);
@@ -1159,7 +1160,7 @@ public:
return auth::checkAuthForUsersInfoCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -1171,7 +1172,7 @@ public:
return appendCommandStatus(result, status);
}
- status = requireAuthSchemaVersion26UpgradeOrFinal(txn, getGlobalAuthorizationManager());
+ status = requireAuthSchemaVersion26UpgradeOrFinal(opCtx, getGlobalAuthorizationManager());
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1190,7 +1191,7 @@ public:
for (size_t i = 0; i < args.userNames.size(); ++i) {
BSONObj userDetails;
status = getGlobalAuthorizationManager()->getUserDescription(
- txn, args.userNames[i], &userDetails);
+ opCtx, args.userNames[i], &userDetails);
if (status.code() == ErrorCodes::UserNotFound) {
continue;
}
@@ -1236,7 +1237,7 @@ public:
}
const stdx::function<void(const BSONObj&)> function = stdx::bind(
appendBSONObjToBSONArrayBuilder, &usersArrayBuilder, stdx::placeholders::_1);
- queryAuthzDocument(txn,
+ queryAuthzDocument(opCtx,
AuthorizationManager::usersCollectionNamespace,
queryBuilder.done(),
projection,
@@ -1270,7 +1271,7 @@ public:
return auth::checkAuthForCreateRoleCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -1334,17 +1335,17 @@ public:
roleObjBuilder.append("roles", rolesVectorToBSONArray(args.roles));
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
// Role existence has to be checked after acquiring the update lock
- status = checkOkayToGrantRolesToRole(txn, args.roleName, args.roles, authzManager);
+ status = checkOkayToGrantRolesToRole(opCtx, args.roleName, args.roles, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1356,7 +1357,7 @@ public:
audit::logCreateRole(Client::getCurrent(), args.roleName, args.roles, args.privileges);
- status = insertRoleDocument(txn, roleObjBuilder.done());
+ status = insertRoleDocument(opCtx, roleObjBuilder.done());
return appendCommandStatus(result, status);
}
@@ -1384,7 +1385,7 @@ public:
return auth::checkAuthForUpdateRoleCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -1418,25 +1419,25 @@ public:
updateSetBuilder.append("roles", rolesVectorToBSONArray(args.roles));
}
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
// Role existence has to be checked after acquiring the update lock
BSONObj ignored;
- status =
- authzManager->getRoleDescription(txn, args.roleName, PrivilegeFormat::kOmit, &ignored);
+ status = authzManager->getRoleDescription(
+ opCtx, args.roleName, PrivilegeFormat::kOmit, &ignored);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
if (args.hasRoles) {
- status = checkOkayToGrantRolesToRole(txn, args.roleName, args.roles, authzManager);
+ status = checkOkayToGrantRolesToRole(opCtx, args.roleName, args.roles, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1454,7 +1455,7 @@ public:
args.hasRoles ? &args.roles : NULL,
args.hasPrivileges ? &args.privileges : NULL);
- status = updateRoleDocument(txn, args.roleName, BSON("$set" << updateSetBuilder.done()));
+ status = updateRoleDocument(opCtx, args.roleName, BSON("$set" << updateSetBuilder.done()));
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
return appendCommandStatus(result, status);
@@ -1483,7 +1484,7 @@ public:
return auth::checkAuthForGrantPrivilegesToRoleCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -1498,11 +1499,11 @@ public:
return appendCommandStatus(result, status);
}
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1522,7 +1523,7 @@ public:
BSONObj roleDoc;
status = authzManager->getRoleDescription(
- txn, roleName, PrivilegeFormat::kShowSeparate, &roleDoc);
+ opCtx, roleName, PrivilegeFormat::kShowSeparate, &roleDoc);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1562,7 +1563,7 @@ public:
audit::logGrantPrivilegesToRole(Client::getCurrent(), roleName, privilegesToAdd);
- status = updateRoleDocument(txn, roleName, updateBSONBuilder.done());
+ status = updateRoleDocument(opCtx, roleName, updateBSONBuilder.done());
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
return appendCommandStatus(result, status);
@@ -1592,7 +1593,7 @@ public:
return auth::checkAuthForRevokePrivilegesFromRoleCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -1606,11 +1607,11 @@ public:
return appendCommandStatus(result, status);
}
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1625,7 +1626,7 @@ public:
BSONObj roleDoc;
status = authzManager->getRoleDescription(
- txn, roleName, PrivilegeFormat::kShowSeparate, &roleDoc);
+ opCtx, roleName, PrivilegeFormat::kShowSeparate, &roleDoc);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1673,7 +1674,7 @@ public:
BSONObjBuilder updateBSONBuilder;
updateObj.writeTo(&updateBSONBuilder);
- status = updateRoleDocument(txn, roleName, updateBSONBuilder.done());
+ status = updateRoleDocument(opCtx, roleName, updateBSONBuilder.done());
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
return appendCommandStatus(result, status);
@@ -1703,7 +1704,7 @@ public:
return auth::checkAuthForGrantRolesToRoleCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -1726,24 +1727,25 @@ public:
<< " is a built-in role and cannot be modified."));
}
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
// Role existence has to be checked after acquiring the update lock
BSONObj roleDoc;
- status = authzManager->getRoleDescription(txn, roleName, PrivilegeFormat::kOmit, &roleDoc);
+ status =
+ authzManager->getRoleDescription(opCtx, roleName, PrivilegeFormat::kOmit, &roleDoc);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
// Check for cycles
- status = checkOkayToGrantRolesToRole(txn, roleName, rolesToAdd, authzManager);
+ status = checkOkayToGrantRolesToRole(opCtx, roleName, rolesToAdd, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1764,7 +1766,7 @@ public:
audit::logGrantRolesToRole(Client::getCurrent(), roleName, rolesToAdd);
status = updateRoleDocument(
- txn, roleName, BSON("$set" << BSON("roles" << rolesVectorToBSONArray(directRoles))));
+ opCtx, roleName, BSON("$set" << BSON("roles" << rolesVectorToBSONArray(directRoles))));
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
return appendCommandStatus(result, status);
@@ -1794,7 +1796,7 @@ public:
return auth::checkAuthForRevokeRolesFromRoleCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -1808,11 +1810,11 @@ public:
return appendCommandStatus(result, status);
}
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1827,7 +1829,8 @@ public:
}
BSONObj roleDoc;
- status = authzManager->getRoleDescription(txn, roleName, PrivilegeFormat::kOmit, &roleDoc);
+ status =
+ authzManager->getRoleDescription(opCtx, roleName, PrivilegeFormat::kOmit, &roleDoc);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1850,7 +1853,7 @@ public:
audit::logRevokeRolesFromRole(Client::getCurrent(), roleName, rolesToRemove);
status = updateRoleDocument(
- txn, roleName, BSON("$set" << BSON("roles" << rolesVectorToBSONArray(roles))));
+ opCtx, roleName, BSON("$set" << BSON("roles" << rolesVectorToBSONArray(roles))));
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
return appendCommandStatus(result, status);
@@ -1884,7 +1887,7 @@ public:
return auth::checkAuthForDropRoleCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -1896,11 +1899,11 @@ public:
return appendCommandStatus(result, status);
}
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1914,7 +1917,8 @@ public:
}
BSONObj roleDoc;
- status = authzManager->getRoleDescription(txn, roleName, PrivilegeFormat::kOmit, &roleDoc);
+ status =
+ authzManager->getRoleDescription(opCtx, roleName, PrivilegeFormat::kOmit, &roleDoc);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1922,7 +1926,7 @@ public:
// Remove this role from all users
long long nMatched;
status = updateAuthzDocuments(
- txn,
+ opCtx,
AuthorizationManager::usersCollectionNamespace,
BSON("roles" << BSON("$elemMatch" << BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
<< roleName.getRole()
@@ -1951,7 +1955,7 @@ public:
// Remove this role from all other roles
status = updateAuthzDocuments(
- txn,
+ opCtx,
AuthorizationManager::rolesCollectionNamespace,
BSON("roles" << BSON("$elemMatch" << BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
<< roleName.getRole()
@@ -1980,7 +1984,7 @@ public:
audit::logDropRole(Client::getCurrent(), roleName);
// Finally, remove the actual role document
- status = removeRoleDocuments(txn,
+ status = removeRoleDocuments(opCtx,
BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
<< roleName.getRole()
<< AuthorizationManager::ROLE_DB_FIELD_NAME
@@ -2038,7 +2042,7 @@ public:
return auth::checkAuthForDropAllRolesFromDatabaseCommand(client, dbname);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -2049,11 +2053,11 @@ public:
return appendCommandStatus(result, status);
}
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -2061,7 +2065,7 @@ public:
// Remove these roles from all users
long long nMatched;
status = updateAuthzDocuments(
- txn,
+ opCtx,
AuthorizationManager::usersCollectionNamespace,
BSON("roles" << BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << dbname)),
BSON("$pull" << BSON("roles"
@@ -2087,7 +2091,7 @@ public:
std::string sourceFieldName = str::stream() << "roles."
<< AuthorizationManager::ROLE_DB_FIELD_NAME;
status = updateAuthzDocuments(
- txn,
+ opCtx,
AuthorizationManager::rolesCollectionNamespace,
BSON(sourceFieldName << dbname),
BSON("$pull" << BSON("roles"
@@ -2112,7 +2116,7 @@ public:
audit::logDropAllRolesFromDatabase(Client::getCurrent(), dbname);
// Finally, remove the actual role documents
status = removeRoleDocuments(
- txn, BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << dbname), &nMatched);
+ opCtx, BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << dbname), &nMatched);
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserCache();
if (!status.isOK()) {
@@ -2182,7 +2186,7 @@ public:
return auth::checkAuthForRolesInfoCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -2194,7 +2198,7 @@ public:
return appendCommandStatus(result, status);
}
- status = requireAuthSchemaVersion26UpgradeOrFinal(txn, getGlobalAuthorizationManager());
+ status = requireAuthSchemaVersion26UpgradeOrFinal(opCtx, getGlobalAuthorizationManager());
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -2202,7 +2206,7 @@ public:
if (args.allForDB) {
std::vector<BSONObj> rolesDocs;
status = getGlobalAuthorizationManager()->getRoleDescriptionsForDB(
- txn, dbname, args.privilegeFormat, args.showBuiltinRoles, &rolesDocs);
+ opCtx, dbname, args.privilegeFormat, args.showBuiltinRoles, &rolesDocs);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -2221,7 +2225,7 @@ public:
} else {
BSONObj roleDetails;
status = getGlobalAuthorizationManager()->getRolesDescription(
- txn, args.roleNames, args.privilegeFormat, &roleDetails);
+ opCtx, args.roleNames, args.privilegeFormat, &roleDetails);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -2264,7 +2268,7 @@ public:
return auth::checkAuthForInvalidateUserCacheCommand(client);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -2303,7 +2307,7 @@ public:
return auth::checkAuthForGetUserCacheGenerationCommand(client);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -2451,7 +2455,7 @@ public:
* admin.system.users collection.
* Also removes any users it encounters from the usersToDrop set.
*/
- static void addUser(OperationContext* txn,
+ static void addUser(OperationContext* opCtx,
AuthorizationManager* authzManager,
StringData db,
bool update,
@@ -2464,7 +2468,7 @@ public:
if (update && usersToDrop->count(userName)) {
auditCreateOrUpdateUser(userObj, false);
- Status status = updatePrivilegeDocument(txn, userName, userObj);
+ Status status = updatePrivilegeDocument(opCtx, userName, userObj);
if (!status.isOK()) {
// Match the behavior of mongorestore to continue on failure
warning() << "Could not update user " << userName
@@ -2472,7 +2476,7 @@ public:
}
} else {
auditCreateOrUpdateUser(userObj, true);
- Status status = insertPrivilegeDocument(txn, userObj);
+ Status status = insertPrivilegeDocument(opCtx, userObj);
if (!status.isOK()) {
// Match the behavior of mongorestore to continue on failure
warning() << "Could not insert user " << userName
@@ -2489,7 +2493,7 @@ public:
* admin.system.roles collection.
* Also removes any roles it encounters from the rolesToDrop set.
*/
- static void addRole(OperationContext* txn,
+ static void addRole(OperationContext* opCtx,
AuthorizationManager* authzManager,
StringData db,
bool update,
@@ -2502,7 +2506,7 @@ public:
if (update && rolesToDrop->count(roleName)) {
auditCreateOrUpdateRole(roleObj, false);
- Status status = updateRoleDocument(txn, roleName, roleObj);
+ Status status = updateRoleDocument(opCtx, roleName, roleObj);
if (!status.isOK()) {
// Match the behavior of mongorestore to continue on failure
warning() << "Could not update role " << roleName
@@ -2510,7 +2514,7 @@ public:
}
} else {
auditCreateOrUpdateRole(roleObj, true);
- Status status = insertRoleDocument(txn, roleObj);
+ Status status = insertRoleDocument(opCtx, roleObj);
if (!status.isOK()) {
// Match the behavior of mongorestore to continue on failure
warning() << "Could not insert role " << roleName
@@ -2524,7 +2528,7 @@ public:
* Moves all user objects from usersCollName into admin.system.users. If drop is true,
* removes any users that were in admin.system.users but not in usersCollName.
*/
- Status processUsers(OperationContext* txn,
+ Status processUsers(OperationContext* opCtx,
AuthorizationManager* authzManager,
StringData usersCollName,
StringData db,
@@ -2550,7 +2554,7 @@ public:
<< 1);
Status status =
- queryAuthzDocument(txn,
+ queryAuthzDocument(opCtx,
AuthorizationManager::usersCollectionNamespace,
query,
fields,
@@ -2563,12 +2567,12 @@ public:
}
Status status = queryAuthzDocument(
- txn,
+ opCtx,
NamespaceString(usersCollName),
db.empty() ? BSONObj() : BSON(AuthorizationManager::USER_DB_FIELD_NAME << db),
BSONObj(),
stdx::bind(&CmdMergeAuthzCollections::addUser,
- txn,
+ opCtx,
authzManager,
db,
drop,
@@ -2585,7 +2589,7 @@ public:
++it) {
const UserName& userName = *it;
audit::logDropUser(Client::getCurrent(), userName);
- status = removePrivilegeDocuments(txn,
+ status = removePrivilegeDocuments(opCtx,
BSON(AuthorizationManager::USER_NAME_FIELD_NAME
<< userName.getUser().toString()
<< AuthorizationManager::USER_DB_FIELD_NAME
@@ -2605,7 +2609,7 @@ public:
* Moves all user objects from usersCollName into admin.system.users. If drop is true,
* removes any users that were in admin.system.users but not in usersCollName.
*/
- Status processRoles(OperationContext* txn,
+ Status processRoles(OperationContext* opCtx,
AuthorizationManager* authzManager,
StringData rolesCollName,
StringData db,
@@ -2630,7 +2634,7 @@ public:
<< 1);
Status status =
- queryAuthzDocument(txn,
+ queryAuthzDocument(opCtx,
AuthorizationManager::rolesCollectionNamespace,
query,
fields,
@@ -2643,12 +2647,12 @@ public:
}
Status status = queryAuthzDocument(
- txn,
+ opCtx,
NamespaceString(rolesCollName),
db.empty() ? BSONObj() : BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << db),
BSONObj(),
stdx::bind(&CmdMergeAuthzCollections::addRole,
- txn,
+ opCtx,
authzManager,
db,
drop,
@@ -2665,7 +2669,7 @@ public:
++it) {
const RoleName& roleName = *it;
audit::logDropRole(Client::getCurrent(), roleName);
- status = removeRoleDocuments(txn,
+ status = removeRoleDocuments(opCtx,
BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
<< roleName.getRole().toString()
<< AuthorizationManager::ROLE_DB_FIELD_NAME
@@ -2681,7 +2685,7 @@ public:
return Status::OK();
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -2701,24 +2705,26 @@ public:
"\"tempRolescollection\""));
}
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(opCtx, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
if (!args.usersCollName.empty()) {
- Status status = processUsers(txn, authzManager, args.usersCollName, args.db, args.drop);
+ Status status =
+ processUsers(opCtx, authzManager, args.usersCollName, args.db, args.drop);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
}
if (!args.rolesCollName.empty()) {
- Status status = processRoles(txn, authzManager, args.rolesCollName, args.db, args.drop);
+ Status status =
+ processRoles(opCtx, authzManager, args.rolesCollName, args.db, args.drop);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -2742,7 +2748,7 @@ Status logUpgradeFailed(const Status& status) {
*
* Throws a DBException on errors.
*/
-void updateUserCredentials(OperationContext* txn,
+void updateUserCredentials(OperationContext* opCtx,
const StringData& sourceDB,
const BSONObj& userDoc) {
// Skip users in $external, SERVER-18475
@@ -2789,7 +2795,7 @@ void updateUserCredentials(OperationContext* txn,
}
uassertStatusOK(updateOneAuthzDocument(
- txn, NamespaceString("admin", "system.users"), query, updateBuilder.obj(), true));
+ opCtx, NamespaceString("admin", "system.users"), query, updateBuilder.obj(), true));
}
/** Loop through all the user documents in the admin.system.users collection.
@@ -2798,20 +2804,20 @@ void updateUserCredentials(OperationContext* txn,
* 2. Remove the MONGODB-CR hash
* 3. Add SCRAM credentials to the user document credentials section
*/
-Status updateCredentials(OperationContext* txn) {
+Status updateCredentials(OperationContext* opCtx) {
// Loop through and update the user documents in admin.system.users.
- Status status =
- queryAuthzDocument(txn,
- NamespaceString("admin", "system.users"),
- BSONObj(),
- BSONObj(),
- stdx::bind(updateUserCredentials, txn, "admin", stdx::placeholders::_1));
+ Status status = queryAuthzDocument(
+ opCtx,
+ NamespaceString("admin", "system.users"),
+ BSONObj(),
+ BSONObj(),
+ stdx::bind(updateUserCredentials, opCtx, "admin", stdx::placeholders::_1));
if (!status.isOK())
return logUpgradeFailed(status);
// Update the schema version document.
status =
- updateOneAuthzDocument(txn,
+ updateOneAuthzDocument(opCtx,
AuthorizationManager::versionCollectionNamespace,
AuthorizationManager::versionDocumentQuery,
BSON("$set" << BSON(AuthorizationManager::schemaVersionFieldName
@@ -2836,11 +2842,11 @@ Status updateCredentials(OperationContext* txn) {
* On failure, returns a status other than Status::OK(). In this case, is is typically safe
* to try again.
*/
-Status upgradeAuthSchemaStep(OperationContext* txn,
+Status upgradeAuthSchemaStep(OperationContext* opCtx,
AuthorizationManager* authzManager,
bool* isDone) {
int authzVersion;
- Status status = authzManager->getAuthorizationVersion(txn, &authzVersion);
+ Status status = authzManager->getAuthorizationVersion(opCtx, &authzVersion);
if (!status.isOK()) {
return status;
}
@@ -2848,7 +2854,7 @@ Status upgradeAuthSchemaStep(OperationContext* txn,
switch (authzVersion) {
case AuthorizationManager::schemaVersion26Final:
case AuthorizationManager::schemaVersion28SCRAM: {
- Status status = updateCredentials(txn);
+ Status status = updateCredentials(opCtx);
if (status.isOK())
*isDone = true;
return status;
@@ -2874,7 +2880,9 @@ Status upgradeAuthSchemaStep(OperationContext* txn,
* progress performing the upgrade, and the specific code and message in the returned status
* may provide additional information.
*/
-Status upgradeAuthSchema(OperationContext* txn, AuthorizationManager* authzManager, int maxSteps) {
+Status upgradeAuthSchema(OperationContext* opCtx,
+ AuthorizationManager* authzManager,
+ int maxSteps) {
if (maxSteps < 1) {
return Status(ErrorCodes::BadValue,
"Minimum value for maxSteps parameter to upgradeAuthSchema is 1");
@@ -2882,7 +2890,7 @@ Status upgradeAuthSchema(OperationContext* txn, AuthorizationManager* authzManag
authzManager->invalidateUserCache();
for (int i = 0; i < maxSteps; ++i) {
bool isDone;
- Status status = upgradeAuthSchemaStep(txn, authzManager, &isDone);
+ Status status = upgradeAuthSchemaStep(opCtx, authzManager, &isDone);
authzManager->invalidateUserCache();
if (!status.isOK() || isDone) {
return status;
@@ -2919,7 +2927,7 @@ public:
return auth::checkAuthForAuthSchemaUpgradeCommand(client);
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -2931,12 +2939,12 @@ public:
return appendCommandStatus(result, status);
}
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
- status = upgradeAuthSchema(txn, authzManager, parsedArgs.maxSteps);
+ status = upgradeAuthSchema(opCtx, authzManager, parsedArgs.maxSteps);
if (status.isOK())
result.append("done", true);
return appendCommandStatus(result, status);
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index 02c577da9c8..2fc05974986 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -75,7 +75,7 @@ public:
}
//{ validate: "collectionnamewithoutthedbpart" [, scandata: <bool>] [, full: <bool> } */
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -109,11 +109,11 @@ public:
LOG(0) << "CMD: validate " << nss.ns();
}
- AutoGetDb ctx(txn, nss.db(), MODE_IX);
- Lock::CollectionLock collLk(txn->lockState(), nss.ns(), MODE_X);
+ AutoGetDb ctx(opCtx, nss.db(), MODE_IX);
+ Lock::CollectionLock collLk(opCtx->lockState(), nss.ns(), MODE_X);
Collection* collection = ctx.getDb() ? ctx.getDb()->getCollection(nss) : NULL;
if (!collection) {
- if (ctx.getDb() && ctx.getDb()->getViewCatalog()->lookup(txn, nss.ns())) {
+ if (ctx.getDb() && ctx.getDb()->getViewCatalog()->lookup(opCtx, nss.ns())) {
errmsg = "Cannot validate a view";
return appendCommandStatus(result, {ErrorCodes::CommandNotSupportedOnView, errmsg});
}
@@ -125,7 +125,7 @@ public:
result.append("ns", nss.ns());
ValidateResults results;
- Status status = collection->validate(txn, level, &results, &result);
+ Status status = collection->validate(opCtx, level, &results, &result);
if (!status.isOK())
return appendCommandStatus(result, status);
diff --git a/src/mongo/db/commands/write_commands/write_commands.cpp b/src/mongo/db/commands/write_commands/write_commands.cpp
index 506709096b8..4093bcf083e 100644
--- a/src/mongo/db/commands/write_commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands.cpp
@@ -87,21 +87,21 @@ Status checkAuthForWriteCommand(Client* client,
return status;
}
-bool shouldSkipOutput(OperationContext* txn) {
- const WriteConcernOptions& writeConcern = txn->getWriteConcern();
+bool shouldSkipOutput(OperationContext* opCtx) {
+ const WriteConcernOptions& writeConcern = opCtx->getWriteConcern();
return writeConcern.wMode.empty() && writeConcern.wNumNodes == 0 &&
(writeConcern.syncMode == WriteConcernOptions::SyncMode::NONE ||
writeConcern.syncMode == WriteConcernOptions::SyncMode::UNSET);
}
enum class ReplyStyle { kUpdate, kNotUpdate }; // update has extra fields.
-void serializeReply(OperationContext* txn,
+void serializeReply(OperationContext* opCtx,
ReplyStyle replyStyle,
bool continueOnError,
size_t opsInBatch,
const WriteResult& result,
BSONObjBuilder* out) {
- if (shouldSkipOutput(txn))
+ if (shouldSkipOutput(opCtx))
return;
long long n = 0;
@@ -170,10 +170,10 @@ void serializeReply(OperationContext* txn,
{
// Undocumented repl fields that mongos depends on.
- auto* replCoord = repl::ReplicationCoordinator::get(txn->getServiceContext());
+ auto* replCoord = repl::ReplicationCoordinator::get(opCtx->getServiceContext());
const auto replMode = replCoord->getReplicationMode();
if (replMode != repl::ReplicationCoordinator::modeNone) {
- const auto lastOp = repl::ReplClientInfo::forClient(txn->getClient()).getLastOp();
+ const auto lastOp = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp();
if (lastOp.getTerm() == repl::OpTime::kUninitializedTerm) {
out->append("opTime", lastOp.getTimestamp());
} else {
@@ -207,22 +207,22 @@ public:
return ReadWriteType::kWrite;
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
std::string& errmsg,
BSONObjBuilder& result) final {
try {
- runImpl(txn, dbname, cmdObj, result);
+ runImpl(opCtx, dbname, cmdObj, result);
return true;
} catch (const DBException& ex) {
- LastError::get(txn->getClient()).setLastError(ex.getCode(), ex.getInfo().msg);
+ LastError::get(opCtx->getClient()).setLastError(ex.getCode(), ex.getInfo().msg);
throw;
}
}
- virtual void runImpl(OperationContext* txn,
+ virtual void runImpl(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) = 0;
@@ -251,13 +251,13 @@ public:
cmdObj);
}
- void runImpl(OperationContext* txn,
+ void runImpl(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) final {
const auto batch = parseInsertCommand(dbname, cmdObj);
- const auto reply = performInserts(txn, batch);
- serializeReply(txn,
+ const auto reply = performInserts(opCtx, batch);
+ serializeReply(opCtx,
ReplyStyle::kNotUpdate,
batch.continueOnError,
batch.documents.size(),
@@ -287,17 +287,21 @@ public:
cmdObj);
}
- void runImpl(OperationContext* txn,
+ void runImpl(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) final {
const auto batch = parseUpdateCommand(dbname, cmdObj);
- const auto reply = performUpdates(txn, batch);
- serializeReply(
- txn, ReplyStyle::kUpdate, batch.continueOnError, batch.updates.size(), reply, &result);
+ const auto reply = performUpdates(opCtx, batch);
+ serializeReply(opCtx,
+ ReplyStyle::kUpdate,
+ batch.continueOnError,
+ batch.updates.size(),
+ reply,
+ &result);
}
- Status explain(OperationContext* txn,
+ Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
ExplainCommon::Verbosity verbosity,
@@ -319,16 +323,16 @@ public:
updateRequest.setYieldPolicy(PlanExecutor::YIELD_AUTO);
updateRequest.setExplain();
- ParsedUpdate parsedUpdate(txn, &updateRequest);
+ ParsedUpdate parsedUpdate(opCtx, &updateRequest);
uassertStatusOK(parsedUpdate.parseRequest());
// Explains of write commands are read-only, but we take write locks so that timing
// info is more accurate.
- ScopedTransaction scopedXact(txn, MODE_IX);
- AutoGetCollection collection(txn, batch.ns, MODE_IX);
+ ScopedTransaction scopedXact(opCtx, MODE_IX);
+ AutoGetCollection collection(opCtx, batch.ns, MODE_IX);
auto exec = uassertStatusOK(getExecutorUpdate(
- txn, &CurOp::get(txn)->debug(), collection.getCollection(), &parsedUpdate));
+ opCtx, &CurOp::get(opCtx)->debug(), collection.getCollection(), &parsedUpdate));
Explain::explainStages(exec.get(), collection.getCollection(), verbosity, out);
return Status::OK();
}
@@ -355,13 +359,13 @@ public:
cmdObj);
}
- void runImpl(OperationContext* txn,
+ void runImpl(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) final {
const auto batch = parseDeleteCommand(dbname, cmdObj);
- const auto reply = performDeletes(txn, batch);
- serializeReply(txn,
+ const auto reply = performDeletes(opCtx, batch);
+ serializeReply(opCtx,
ReplyStyle::kNotUpdate,
batch.continueOnError,
batch.deletes.size(),
@@ -369,7 +373,7 @@ public:
&result);
}
- Status explain(OperationContext* txn,
+ Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
ExplainCommon::Verbosity verbosity,
@@ -387,17 +391,17 @@ public:
deleteRequest.setYieldPolicy(PlanExecutor::YIELD_AUTO);
deleteRequest.setExplain();
- ParsedDelete parsedDelete(txn, &deleteRequest);
+ ParsedDelete parsedDelete(opCtx, &deleteRequest);
uassertStatusOK(parsedDelete.parseRequest());
// Explains of write commands are read-only, but we take write locks so that timing
// info is more accurate.
- ScopedTransaction scopedXact(txn, MODE_IX);
- AutoGetCollection collection(txn, batch.ns, MODE_IX);
+ ScopedTransaction scopedXact(opCtx, MODE_IX);
+ AutoGetCollection collection(opCtx, batch.ns, MODE_IX);
// Explain the plan tree.
auto exec = uassertStatusOK(getExecutorDelete(
- txn, &CurOp::get(txn)->debug(), collection.getCollection(), &parsedDelete));
+ opCtx, &CurOp::get(opCtx)->debug(), collection.getCollection(), &parsedDelete));
Explain::explainStages(exec.get(), collection.getCollection(), verbosity, out);
return Status::OK();
}
diff --git a/src/mongo/db/commands_helpers.h b/src/mongo/db/commands_helpers.h
index d198e2323c0..0f559a3af86 100644
--- a/src/mongo/db/commands_helpers.h
+++ b/src/mongo/db/commands_helpers.h
@@ -45,7 +45,7 @@ class ReplyBuilderInterface;
// both members, and defined to be the same symbol.
// Implemented in `src/mongo/s/s_only.cpp`.
-void execCommandClient(OperationContext* txn,
+void execCommandClient(OperationContext* opCtx,
Command* c,
int queryOptions,
const char* ns,
@@ -53,7 +53,7 @@ void execCommandClient(OperationContext* txn,
BSONObjBuilder& result);
// Implemented in `src/mongo/db/commands/dbcommands.cpp`.
-void execCommandDatabase(OperationContext* txn,
+void execCommandDatabase(OperationContext* opCtx,
Command* command,
const rpc::RequestInterface& request,
rpc::ReplyBuilderInterface* replyBuilder);
diff --git a/src/mongo/db/concurrency/d_concurrency.h b/src/mongo/db/concurrency/d_concurrency.h
index fe8ac0f49a1..0d93b3ef2d0 100644
--- a/src/mongo/db/concurrency/d_concurrency.h
+++ b/src/mongo/db/concurrency/d_concurrency.h
@@ -358,5 +358,5 @@ public:
* WUOW. This ensures that a MODE_X lock on this resource will wait for all in-flight capped
* inserts to either commit or rollback and block new ones from starting.
*/
-void synchronizeOnCappedInFlightResource(Locker* txn, const NamespaceString& cappedNs);
+void synchronizeOnCappedInFlightResource(Locker* opCtx, const NamespaceString& cappedNs);
}
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 6dae80fbbb0..1e63c1b55bc 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -175,7 +175,7 @@ ntservice::NtServiceDefaultStrings defaultServiceStrings = {
Timer startupSrandTimer;
-void logStartup(OperationContext* txn) {
+void logStartup(OperationContext* opCtx) {
BSONObjBuilder toLog;
stringstream id;
id << getHostNameCached() << "-" << jsTime().asInt64();
@@ -196,28 +196,28 @@ void logStartup(OperationContext* txn) {
BSONObj o = toLog.obj();
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
- AutoGetOrCreateDb autoDb(txn, startupLogCollectionName.db(), mongo::MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite lk(opCtx->lockState());
+ AutoGetOrCreateDb autoDb(opCtx, startupLogCollectionName.db(), mongo::MODE_X);
Database* db = autoDb.getDb();
Collection* collection = db->getCollection(startupLogCollectionName);
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
if (!collection) {
BSONObj options = BSON("capped" << true << "size" << 10 * 1024 * 1024);
- bool shouldReplicateWrites = txn->writesAreReplicated();
- txn->setReplicatedWrites(false);
- ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, txn, shouldReplicateWrites);
- uassertStatusOK(userCreateNS(txn, db, startupLogCollectionName.ns(), options));
+ bool shouldReplicateWrites = opCtx->writesAreReplicated();
+ opCtx->setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, opCtx, shouldReplicateWrites);
+ uassertStatusOK(userCreateNS(opCtx, db, startupLogCollectionName.ns(), options));
collection = db->getCollection(startupLogCollectionName);
}
invariant(collection);
OpDebug* const nullOpDebug = nullptr;
- uassertStatusOK(collection->insertDocument(txn, o, nullOpDebug, false));
+ uassertStatusOK(collection->insertDocument(opCtx, o, nullOpDebug, false));
wunit.commit();
}
-void checkForIdIndexes(OperationContext* txn, Database* db) {
+void checkForIdIndexes(OperationContext* opCtx, Database* db) {
if (db->name() == "local") {
// we do not need an _id index on anything in the local database
return;
@@ -237,7 +237,7 @@ void checkForIdIndexes(OperationContext* txn, Database* db) {
if (!coll)
continue;
- if (coll->getIndexCatalog()->findIdIndex(txn))
+ if (coll->getIndexCatalog()->findIdIndex(opCtx))
continue;
log() << "WARNING: the collection '" << *i << "' lacks a unique index on _id."
@@ -255,9 +255,9 @@ void checkForIdIndexes(OperationContext* txn, Database* db) {
* @returns the number of documents in local.system.replset or 0 if this was started with
* --replset.
*/
-unsigned long long checkIfReplMissingFromCommandLine(OperationContext* txn) {
+unsigned long long checkIfReplMissingFromCommandLine(OperationContext* opCtx) {
if (!repl::getGlobalReplicationCoordinator()->getSettings().usingReplSets()) {
- DBDirectClient c(txn);
+ DBDirectClient c(opCtx);
return c.count(kSystemReplSetCollection.ns());
}
return 0;
@@ -267,9 +267,9 @@ unsigned long long checkIfReplMissingFromCommandLine(OperationContext* txn) {
* Check that the oplog is capped, and abort the process if it is not.
* Caller must lock DB before calling this function.
*/
-void checkForCappedOplog(OperationContext* txn, Database* db) {
+void checkForCappedOplog(OperationContext* opCtx, Database* db) {
const NamespaceString oplogNss(repl::rsOplogName);
- invariant(txn->lockState()->isDbLockedForMode(oplogNss.db(), MODE_IS));
+ invariant(opCtx->lockState()->isDbLockedForMode(oplogNss.db(), MODE_IS));
Collection* oplogCollection = db->getCollection(oplogNss);
if (oplogCollection && !oplogCollection->isCapped()) {
severe() << "The oplog collection " << oplogNss
@@ -278,15 +278,15 @@ void checkForCappedOplog(OperationContext* txn, Database* db) {
}
}
-void repairDatabasesAndCheckVersion(OperationContext* txn) {
+void repairDatabasesAndCheckVersion(OperationContext* opCtx) {
LOG(1) << "enter repairDatabases (to check pdfile version #)";
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite lk(opCtx->lockState());
vector<string> dbNames;
- StorageEngine* storageEngine = txn->getServiceContext()->getGlobalStorageEngine();
+ StorageEngine* storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine();
storageEngine->listDatabases(&dbNames);
// Repair all databases first, so that we do not try to open them if they are in bad shape
@@ -296,7 +296,7 @@ void repairDatabasesAndCheckVersion(OperationContext* txn) {
const string dbName = *i;
LOG(1) << " Repairing database: " << dbName;
- fassert(18506, repairDatabase(txn, storageEngine, dbName));
+ fassert(18506, repairDatabase(opCtx, storageEngine, dbName));
}
}
@@ -309,8 +309,8 @@ void repairDatabasesAndCheckVersion(OperationContext* txn) {
// yet, then it will be created. If the mongod is running in a read-only mode, then it is
// fine to not open the "local" database and populate the catalog entries because we won't
// attempt to drop the temporary collections anyway.
- Lock::DBLock dbLock(txn->lockState(), kSystemReplSetCollection.db(), MODE_X);
- dbHolder().openDb(txn, kSystemReplSetCollection.db());
+ Lock::DBLock dbLock(opCtx->lockState(), kSystemReplSetCollection.db(), MODE_X);
+ dbHolder().openDb(opCtx, kSystemReplSetCollection.db());
}
// On replica set members we only clear temp collections on DBs other than "local" during
@@ -318,19 +318,19 @@ void repairDatabasesAndCheckVersion(OperationContext* txn) {
// to. The local DB is special because it is not replicated. See SERVER-10927 for more
// details.
const bool shouldClearNonLocalTmpCollections =
- !(checkIfReplMissingFromCommandLine(txn) || replSettings.usingReplSets() ||
+ !(checkIfReplMissingFromCommandLine(opCtx) || replSettings.usingReplSets() ||
replSettings.isSlave());
for (vector<string>::const_iterator i = dbNames.begin(); i != dbNames.end(); ++i) {
const string dbName = *i;
LOG(1) << " Recovering database: " << dbName;
- Database* db = dbHolder().openDb(txn, dbName);
+ Database* db = dbHolder().openDb(opCtx, dbName);
invariant(db);
// First thing after opening the database is to check for file compatibility,
// otherwise we might crash if this is a deprecated format.
- auto status = db->getDatabaseCatalogEntry()->currentFilesCompatible(txn);
+ auto status = db->getDatabaseCatalogEntry()->currentFilesCompatible(opCtx);
if (!status.isOK()) {
if (status.code() == ErrorCodes::CanRepairToDowngrade) {
// Convert CanRepairToDowngrade statuses to MustUpgrade statuses to avoid logging a
@@ -355,7 +355,7 @@ void repairDatabasesAndCheckVersion(OperationContext* txn) {
if (Collection* versionColl =
db->getCollection(FeatureCompatibilityVersion::kCollection)) {
BSONObj featureCompatibilityVersion;
- if (Helpers::findOne(txn,
+ if (Helpers::findOne(opCtx,
versionColl,
BSON("_id" << FeatureCompatibilityVersion::kParameterName),
featureCompatibilityVersion)) {
@@ -373,8 +373,8 @@ void repairDatabasesAndCheckVersion(OperationContext* txn) {
const string systemIndexes = db->name() + ".system.indexes";
Collection* coll = db->getCollection(systemIndexes);
- unique_ptr<PlanExecutor> exec(
- InternalPlanner::collectionScan(txn, systemIndexes, coll, PlanExecutor::YIELD_MANUAL));
+ unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(
+ opCtx, systemIndexes, coll, PlanExecutor::YIELD_MANUAL));
BSONObj index;
PlanExecutor::ExecState state;
@@ -382,7 +382,7 @@ void repairDatabasesAndCheckVersion(OperationContext* txn) {
const BSONObj key = index.getObjectField("key");
const string plugin = IndexNames::findPluginName(key);
- if (db->getDatabaseCatalogEntry()->isOlderThan24(txn)) {
+ if (db->getDatabaseCatalogEntry()->isOlderThan24(opCtx)) {
if (IndexNames::existedBefore24(plugin)) {
continue;
}
@@ -408,17 +408,17 @@ void repairDatabasesAndCheckVersion(OperationContext* txn) {
if (replSettings.usingReplSets()) {
// We only care about the _id index if we are in a replset
- checkForIdIndexes(txn, db);
+ checkForIdIndexes(opCtx, db);
// Ensure oplog is capped (mmap does not guarantee order of inserts on noncapped
// collections)
if (db->name() == "local") {
- checkForCappedOplog(txn, db);
+ checkForCappedOplog(opCtx, db);
}
}
if (!storageGlobalParams.readOnly &&
(shouldClearNonLocalTmpCollections || dbName == "local")) {
- db->clearTmpCollections(txn);
+ db->clearTmpCollections(opCtx);
}
}
@@ -443,8 +443,8 @@ ExitCode _initAndListen(int listenPort) {
globalServiceContext->setOpObserver(stdx::make_unique<OpObserverImpl>());
DBDirectClientFactory::get(globalServiceContext)
- .registerImplementation([](OperationContext* txn) {
- return std::unique_ptr<DBClientBase>(new DBDirectClient(txn));
+ .registerImplementation([](OperationContext* opCtx) {
+ return std::unique_ptr<DBClientBase>(new DBDirectClient(opCtx));
});
const repl::ReplSettings& replSettings = repl::getGlobalReplicationCoordinator()->getSettings();
@@ -940,10 +940,10 @@ static void shutdownTask() {
Client& client = cc();
ServiceContext::UniqueOperationContext uniqueTxn;
- OperationContext* txn = client.getOperationContext();
- if (!txn && serviceContext->getGlobalStorageEngine()) {
+ OperationContext* opCtx = client.getOperationContext();
+ if (!opCtx && serviceContext->getGlobalStorageEngine()) {
uniqueTxn = client.makeOperationContext();
- txn = uniqueTxn.get();
+ opCtx = uniqueTxn.get();
}
log(LogComponent::kNetwork) << "shutdown: going to close listening sockets..." << endl;
@@ -952,10 +952,10 @@ static void shutdownTask() {
log(LogComponent::kNetwork) << "shutdown: going to flush diaglog..." << endl;
_diaglog.flush();
- if (txn) {
+ if (opCtx) {
// This can wait a long time while we drain the secondary's apply queue, especially if it is
// building an index.
- repl::ReplicationCoordinator::get(txn)->shutdown(txn);
+ repl::ReplicationCoordinator::get(opCtx)->shutdown(opCtx);
}
if (serviceContext)
@@ -1012,8 +1012,8 @@ static void shutdownTask() {
// Shutdown Full-Time Data Capture
stopFTDC();
- if (txn) {
- ShardingState::get(txn)->shutDown(txn);
+ if (opCtx) {
+ ShardingState::get(opCtx)->shutDown(opCtx);
}
// We should always be able to acquire the global lock at shutdown.
diff --git a/src/mongo/db/db_raii.cpp b/src/mongo/db/db_raii.cpp
index 2808c7e0e35..051d9141ebf 100644
--- a/src/mongo/db/db_raii.cpp
+++ b/src/mongo/db/db_raii.cpp
@@ -41,30 +41,30 @@
namespace mongo {
-AutoGetDb::AutoGetDb(OperationContext* txn, StringData ns, LockMode mode)
- : _dbLock(txn->lockState(), ns, mode), _db(dbHolder().get(txn, ns)) {}
+AutoGetDb::AutoGetDb(OperationContext* opCtx, StringData ns, LockMode mode)
+ : _dbLock(opCtx->lockState(), ns, mode), _db(dbHolder().get(opCtx, ns)) {}
-AutoGetCollection::AutoGetCollection(OperationContext* txn,
+AutoGetCollection::AutoGetCollection(OperationContext* opCtx,
const NamespaceString& nss,
LockMode modeDB,
LockMode modeColl,
ViewMode viewMode)
: _viewMode(viewMode),
- _autoDb(txn, nss.db(), modeDB),
- _collLock(txn->lockState(), nss.ns(), modeColl),
+ _autoDb(opCtx, nss.db(), modeDB),
+ _collLock(opCtx->lockState(), nss.ns(), modeColl),
_coll(_autoDb.getDb() ? _autoDb.getDb()->getCollection(nss) : nullptr) {
Database* db = _autoDb.getDb();
// If the database exists, but not the collection, check for views.
if (_viewMode == ViewMode::kViewsForbidden && db && !_coll &&
- db->getViewCatalog()->lookup(txn, nss.ns()))
+ db->getViewCatalog()->lookup(opCtx, nss.ns()))
uasserted(ErrorCodes::CommandNotSupportedOnView,
str::stream() << "Namespace " << nss.ns() << " is a view, not a collection");
}
-AutoGetOrCreateDb::AutoGetOrCreateDb(OperationContext* txn, StringData ns, LockMode mode)
- : _transaction(txn, MODE_IX),
- _dbLock(txn->lockState(), ns, mode),
- _db(dbHolder().get(txn, ns)) {
+AutoGetOrCreateDb::AutoGetOrCreateDb(OperationContext* opCtx, StringData ns, LockMode mode)
+ : _transaction(opCtx, MODE_IX),
+ _dbLock(opCtx->lockState(), ns, mode),
+ _db(dbHolder().get(opCtx, ns)) {
invariant(mode == MODE_IX || mode == MODE_X);
_justCreated = false;
// If the database didn't exist, relock in MODE_X
@@ -72,20 +72,20 @@ AutoGetOrCreateDb::AutoGetOrCreateDb(OperationContext* txn, StringData ns, LockM
if (mode != MODE_X) {
_dbLock.relockWithMode(MODE_X);
}
- _db = dbHolder().openDb(txn, ns);
+ _db = dbHolder().openDb(opCtx, ns);
_justCreated = true;
}
}
-AutoGetCollectionForRead::AutoGetCollectionForRead(OperationContext* txn,
+AutoGetCollectionForRead::AutoGetCollectionForRead(OperationContext* opCtx,
const NamespaceString& nss,
AutoGetCollection::ViewMode viewMode)
- : _txn(txn), _transaction(txn, MODE_IS) {
+ : _opCtx(opCtx), _transaction(opCtx, MODE_IS) {
{
- _autoColl.emplace(txn, nss, MODE_IS, MODE_IS, viewMode);
+ _autoColl.emplace(opCtx, nss, MODE_IS, MODE_IS, viewMode);
- auto curOp = CurOp::get(_txn);
- stdx::lock_guard<Client> lk(*_txn->getClient());
+ auto curOp = CurOp::get(_opCtx);
+ stdx::lock_guard<Client> lk(*_opCtx->getClient());
// TODO: OldClientContext legacy, needs to be removed
curOp->ensureStarted();
@@ -104,15 +104,15 @@ AutoGetCollectionForRead::AutoGetCollectionForRead(OperationContext* txn,
// We have both the DB and collection locked, which is the prerequisite to do a stable shard
// version check, but we'd like to do the check after we have a satisfactory snapshot.
- auto css = CollectionShardingState::get(txn, nss);
- css->checkShardVersionOrThrow(txn);
+ auto css = CollectionShardingState::get(opCtx, nss);
+ css->checkShardVersionOrThrow(opCtx);
}
AutoGetCollectionForRead::~AutoGetCollectionForRead() {
// Report time spent in read lock
- auto currentOp = CurOp::get(_txn);
- Top::get(_txn->getClient()->getServiceContext())
- .record(_txn,
+ auto currentOp = CurOp::get(_opCtx);
+ Top::get(_opCtx->getClient()->getServiceContext())
+ .record(_opCtx,
currentOp->getNS(),
currentOp->getLogicalOp(),
-1, // "read locked"
@@ -131,7 +131,7 @@ void AutoGetCollectionForRead::_ensureMajorityCommittedSnapshotIsValid(const Nam
if (!minSnapshot) {
return;
}
- auto mySnapshot = _txn->recoveryUnit()->getMajorityCommittedSnapshot();
+ auto mySnapshot = _opCtx->recoveryUnit()->getMajorityCommittedSnapshot();
if (!mySnapshot) {
return;
}
@@ -142,25 +142,25 @@ void AutoGetCollectionForRead::_ensureMajorityCommittedSnapshotIsValid(const Nam
// Yield locks.
_autoColl = boost::none;
- repl::ReplicationCoordinator::get(_txn)->waitUntilSnapshotCommitted(_txn, *minSnapshot);
+ repl::ReplicationCoordinator::get(_opCtx)->waitUntilSnapshotCommitted(_opCtx, *minSnapshot);
- uassertStatusOK(_txn->recoveryUnit()->setReadFromMajorityCommittedSnapshot());
+ uassertStatusOK(_opCtx->recoveryUnit()->setReadFromMajorityCommittedSnapshot());
{
- stdx::lock_guard<Client> lk(*_txn->getClient());
- CurOp::get(_txn)->yielded();
+ stdx::lock_guard<Client> lk(*_opCtx->getClient());
+ CurOp::get(_opCtx)->yielded();
}
// Relock.
- _autoColl.emplace(_txn, nss, MODE_IS);
+ _autoColl.emplace(_opCtx, nss, MODE_IS);
}
}
-AutoGetCollectionOrViewForRead::AutoGetCollectionOrViewForRead(OperationContext* txn,
+AutoGetCollectionOrViewForRead::AutoGetCollectionOrViewForRead(OperationContext* opCtx,
const NamespaceString& nss)
- : AutoGetCollectionForRead(txn, nss, AutoGetCollection::ViewMode::kViewsPermitted),
+ : AutoGetCollectionForRead(opCtx, nss, AutoGetCollection::ViewMode::kViewsPermitted),
_view(_autoColl->getDb() && !getCollection()
- ? _autoColl->getDb()->getViewCatalog()->lookup(txn, nss.ns())
+ ? _autoColl->getDb()->getViewCatalog()->lookup(opCtx, nss.ns())
: nullptr) {}
void AutoGetCollectionOrViewForRead::releaseLocksForView() noexcept {
@@ -169,32 +169,32 @@ void AutoGetCollectionOrViewForRead::releaseLocksForView() noexcept {
_autoColl = boost::none;
}
-OldClientContext::OldClientContext(OperationContext* txn,
+OldClientContext::OldClientContext(OperationContext* opCtx,
const std::string& ns,
Database* db,
bool justCreated)
- : _justCreated(justCreated), _doVersion(true), _ns(ns), _db(db), _txn(txn) {
+ : _justCreated(justCreated), _doVersion(true), _ns(ns), _db(db), _opCtx(opCtx) {
_finishInit();
}
-OldClientContext::OldClientContext(OperationContext* txn,
+OldClientContext::OldClientContext(OperationContext* opCtx,
const std::string& ns,
bool doVersion)
: _justCreated(false), // set for real in finishInit
_doVersion(doVersion),
_ns(ns),
_db(NULL),
- _txn(txn) {
+ _opCtx(opCtx) {
_finishInit();
}
void OldClientContext::_finishInit() {
- _db = dbHolder().get(_txn, _ns);
+ _db = dbHolder().get(_opCtx, _ns);
if (_db) {
_justCreated = false;
} else {
- invariant(_txn->lockState()->isDbLockedForMode(nsToDatabaseSubstring(_ns), MODE_X));
- _db = dbHolder().openDb(_txn, _ns, &_justCreated);
+ invariant(_opCtx->lockState()->isDbLockedForMode(nsToDatabaseSubstring(_ns), MODE_X));
+ _db = dbHolder().openDb(_opCtx, _ns, &_justCreated);
invariant(_db);
}
@@ -202,32 +202,32 @@ void OldClientContext::_finishInit() {
_checkNotStale();
}
- stdx::lock_guard<Client> lk(*_txn->getClient());
- CurOp::get(_txn)->enter_inlock(_ns.c_str(), _db->getProfilingLevel());
+ stdx::lock_guard<Client> lk(*_opCtx->getClient());
+ CurOp::get(_opCtx)->enter_inlock(_ns.c_str(), _db->getProfilingLevel());
}
void OldClientContext::_checkNotStale() const {
- switch (CurOp::get(_txn)->getNetworkOp()) {
+ switch (CurOp::get(_opCtx)->getNetworkOp()) {
case dbGetMore: // getMore is special and should be handled elsewhere.
case dbUpdate: // update & delete check shard version in instance.cpp, so don't check
case dbDelete: // here as well.
break;
default:
- auto css = CollectionShardingState::get(_txn, _ns);
- css->checkShardVersionOrThrow(_txn);
+ auto css = CollectionShardingState::get(_opCtx, _ns);
+ css->checkShardVersionOrThrow(_opCtx);
}
}
OldClientContext::~OldClientContext() {
// Lock must still be held
- invariant(_txn->lockState()->isLocked());
+ invariant(_opCtx->lockState()->isLocked());
- auto currentOp = CurOp::get(_txn);
- Top::get(_txn->getClient()->getServiceContext())
- .record(_txn,
+ auto currentOp = CurOp::get(_opCtx);
+ Top::get(_opCtx->getClient()->getServiceContext())
+ .record(_opCtx,
currentOp->getNS(),
currentOp->getLogicalOp(),
- _txn->lockState()->isWriteLocked() ? 1 : -1,
+ _opCtx->lockState()->isWriteLocked() ? 1 : -1,
_timer.micros(),
currentOp->isCommand(),
currentOp->getReadWriteType());
@@ -235,7 +235,7 @@ OldClientContext::~OldClientContext() {
OldClientWriteContext::OldClientWriteContext(OperationContext* opCtx, const std::string& ns)
- : _txn(opCtx),
+ : _opCtx(opCtx),
_nss(ns),
_autodb(opCtx, _nss.db(), MODE_IX),
_collk(opCtx->lockState(), ns, MODE_IX),
@@ -244,7 +244,7 @@ OldClientWriteContext::OldClientWriteContext(OperationContext* opCtx, const std:
if (!_collection && !_autodb.justCreated()) {
// relock database in MODE_X to allow collection creation
_collk.relockAsDatabaseExclusive(_autodb.lock());
- Database* db = dbHolder().get(_txn, ns);
+ Database* db = dbHolder().get(_opCtx, ns);
invariant(db == _c.db());
}
}
diff --git a/src/mongo/db/db_raii.h b/src/mongo/db/db_raii.h
index dc2cf8c0aaa..04d1658b24d 100644
--- a/src/mongo/db/db_raii.h
+++ b/src/mongo/db/db_raii.h
@@ -53,7 +53,7 @@ class AutoGetDb {
MONGO_DISALLOW_COPYING(AutoGetDb);
public:
- AutoGetDb(OperationContext* txn, StringData ns, LockMode mode);
+ AutoGetDb(OperationContext* opCtx, StringData ns, LockMode mode);
Database* getDb() const {
return _db;
@@ -77,14 +77,14 @@ class AutoGetCollection {
enum class ViewMode;
public:
- AutoGetCollection(OperationContext* txn, const NamespaceString& nss, LockMode modeAll)
- : AutoGetCollection(txn, nss, modeAll, modeAll, ViewMode::kViewsForbidden) {}
+ AutoGetCollection(OperationContext* opCtx, const NamespaceString& nss, LockMode modeAll)
+ : AutoGetCollection(opCtx, nss, modeAll, modeAll, ViewMode::kViewsForbidden) {}
- AutoGetCollection(OperationContext* txn,
+ AutoGetCollection(OperationContext* opCtx,
const NamespaceString& nss,
LockMode modeDB,
LockMode modeColl)
- : AutoGetCollection(txn, nss, modeDB, modeColl, ViewMode::kViewsForbidden) {}
+ : AutoGetCollection(opCtx, nss, modeDB, modeColl, ViewMode::kViewsForbidden) {}
/**
* This constructor is inteded for internal use and should not be used outside this file.
@@ -92,7 +92,7 @@ public:
* or not it is permissible to obtain a handle on a view namespace. Use another constructor or
* another AutoGet class instead.
*/
- AutoGetCollection(OperationContext* txn,
+ AutoGetCollection(OperationContext* opCtx,
const NamespaceString& nss,
LockMode modeDB,
LockMode modeColl,
@@ -132,7 +132,7 @@ class AutoGetOrCreateDb {
MONGO_DISALLOW_COPYING(AutoGetOrCreateDb);
public:
- AutoGetOrCreateDb(OperationContext* txn, StringData ns, LockMode mode);
+ AutoGetOrCreateDb(OperationContext* opCtx, StringData ns, LockMode mode);
Database* getDb() const {
return _db;
@@ -166,8 +166,8 @@ class AutoGetCollectionForRead {
MONGO_DISALLOW_COPYING(AutoGetCollectionForRead);
public:
- AutoGetCollectionForRead(OperationContext* txn, const NamespaceString& nss)
- : AutoGetCollectionForRead(txn, nss, AutoGetCollection::ViewMode::kViewsForbidden) {}
+ AutoGetCollectionForRead(OperationContext* opCtx, const NamespaceString& nss)
+ : AutoGetCollectionForRead(opCtx, nss, AutoGetCollection::ViewMode::kViewsForbidden) {}
~AutoGetCollectionForRead();
@@ -183,11 +183,11 @@ private:
void _ensureMajorityCommittedSnapshotIsValid(const NamespaceString& nss);
const Timer _timer;
- OperationContext* const _txn;
+ OperationContext* const _opCtx;
const ScopedTransaction _transaction;
protected:
- AutoGetCollectionForRead(OperationContext* txn,
+ AutoGetCollectionForRead(OperationContext* opCtx,
const NamespaceString& nss,
AutoGetCollection::ViewMode viewMode);
@@ -207,7 +207,7 @@ class AutoGetCollectionOrViewForRead final : public AutoGetCollectionForRead {
MONGO_DISALLOW_COPYING(AutoGetCollectionOrViewForRead);
public:
- AutoGetCollectionOrViewForRead(OperationContext* txn, const NamespaceString& nss);
+ AutoGetCollectionOrViewForRead(OperationContext* opCtx, const NamespaceString& nss);
ViewDefinition* getView() const {
return _view.get();
@@ -235,13 +235,16 @@ class OldClientContext {
public:
/** this is probably what you want */
- OldClientContext(OperationContext* txn, const std::string& ns, bool doVersion = true);
+ OldClientContext(OperationContext* opCtx, const std::string& ns, bool doVersion = true);
/**
* Below still calls _finishInit, but assumes database has already been acquired
* or just created.
*/
- OldClientContext(OperationContext* txn, const std::string& ns, Database* db, bool justCreated);
+ OldClientContext(OperationContext* opCtx,
+ const std::string& ns,
+ Database* db,
+ bool justCreated);
~OldClientContext();
@@ -263,7 +266,7 @@ private:
bool _doVersion;
const std::string _ns;
Database* _db;
- OperationContext* _txn;
+ OperationContext* _opCtx;
Timer _timer;
};
@@ -284,7 +287,7 @@ public:
}
private:
- OperationContext* const _txn;
+ OperationContext* const _opCtx;
const NamespaceString _nss;
AutoGetOrCreateDb _autodb;
diff --git a/src/mongo/db/dbdirectclient.cpp b/src/mongo/db/dbdirectclient.cpp
index 40fcdb79dc2..90d511716c8 100644
--- a/src/mongo/db/dbdirectclient.cpp
+++ b/src/mongo/db/dbdirectclient.cpp
@@ -53,24 +53,24 @@ class DirectClientScope {
MONGO_DISALLOW_COPYING(DirectClientScope);
public:
- explicit DirectClientScope(OperationContext* txn)
- : _txn(txn), _prev(_txn->getClient()->isInDirectClient()) {
- _txn->getClient()->setInDirectClient(true);
+ explicit DirectClientScope(OperationContext* opCtx)
+ : _opCtx(opCtx), _prev(_opCtx->getClient()->isInDirectClient()) {
+ _opCtx->getClient()->setInDirectClient(true);
}
~DirectClientScope() {
- _txn->getClient()->setInDirectClient(_prev);
+ _opCtx->getClient()->setInDirectClient(_prev);
}
private:
- OperationContext* const _txn;
+ OperationContext* const _opCtx;
const bool _prev;
};
} // namespace
-DBDirectClient::DBDirectClient(OperationContext* txn) : _txn(txn) {}
+DBDirectClient::DBDirectClient(OperationContext* opCtx) : _opCtx(opCtx) {}
bool DBDirectClient::isFailed() const {
return false;
@@ -110,8 +110,8 @@ bool DBDirectClient::lazySupported() const {
return true;
}
-void DBDirectClient::setOpCtx(OperationContext* txn) {
- _txn = txn;
+void DBDirectClient::setOpCtx(OperationContext* opCtx) {
+ _opCtx = opCtx;
}
QueryOptions DBDirectClient::_lookupAvailableOptions() {
@@ -120,12 +120,12 @@ QueryOptions DBDirectClient::_lookupAvailableOptions() {
}
bool DBDirectClient::call(Message& toSend, Message& response, bool assertOk, string* actualServer) {
- DirectClientScope directClientScope(_txn);
- LastError::get(_txn->getClient()).startRequest();
+ DirectClientScope directClientScope(_opCtx);
+ LastError::get(_opCtx->getClient()).startRequest();
DbResponse dbResponse;
- CurOp curOp(_txn);
- assembleResponse(_txn, toSend, dbResponse, kHostAndPortForDirectClient);
+ CurOp curOp(_opCtx);
+ assembleResponse(_opCtx, toSend, dbResponse, kHostAndPortForDirectClient);
verify(!dbResponse.response.empty());
response = std::move(dbResponse.response);
@@ -133,12 +133,12 @@ bool DBDirectClient::call(Message& toSend, Message& response, bool assertOk, str
}
void DBDirectClient::say(Message& toSend, bool isRetry, string* actualServer) {
- DirectClientScope directClientScope(_txn);
- LastError::get(_txn->getClient()).startRequest();
+ DirectClientScope directClientScope(_opCtx);
+ LastError::get(_opCtx->getClient()).startRequest();
DbResponse dbResponse;
- CurOp curOp(_txn);
- assembleResponse(_txn, toSend, dbResponse, kHostAndPortForDirectClient);
+ CurOp curOp(_opCtx);
+ assembleResponse(_opCtx, toSend, dbResponse, kHostAndPortForDirectClient);
}
unique_ptr<DBClientCursor> DBDirectClient::query(const string& ns,
@@ -164,7 +164,7 @@ unsigned long long DBDirectClient::count(
std::string errmsg;
BSONObjBuilder result;
- bool runRetval = countCmd->run(_txn, dbname, cmdObj, options, errmsg, result);
+ bool runRetval = countCmd->run(_opCtx, dbname, cmdObj, options, errmsg, result);
if (!runRetval) {
Command::appendCommandStatus(result, runRetval, errmsg);
Status commandStatus = getStatusFromCommandResult(result.obj());
diff --git a/src/mongo/db/dbdirectclient.h b/src/mongo/db/dbdirectclient.h
index 1bd4c407f8a..c35a2855e6f 100644
--- a/src/mongo/db/dbdirectclient.h
+++ b/src/mongo/db/dbdirectclient.h
@@ -49,12 +49,12 @@ class OperationContext;
*/
class DBDirectClient : public DBClientBase {
public:
- DBDirectClient(OperationContext* txn);
+ DBDirectClient(OperationContext* opCtx);
using DBClientBase::query;
// XXX: is this valid or useful?
- void setOpCtx(OperationContext* txn);
+ void setOpCtx(OperationContext* opCtx);
virtual std::unique_ptr<DBClientCursor> query(const std::string& ns,
Query query,
@@ -97,7 +97,7 @@ public:
int getMaxWireVersion() final;
private:
- OperationContext* _txn;
+ OperationContext* _opCtx;
};
} // namespace mongo
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 2dd6c2c7d1c..83f4c15e52d 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -83,7 +83,7 @@ using std::stringstream;
using logger::LogComponent;
-void Helpers::ensureIndex(OperationContext* txn,
+void Helpers::ensureIndex(OperationContext* opCtx,
Collection* collection,
BSONObj keyPattern,
IndexDescriptor::IndexVersion indexVersion,
@@ -97,7 +97,7 @@ void Helpers::ensureIndex(OperationContext* txn,
b.appendBool("unique", unique);
BSONObj o = b.done();
- MultiIndexBlock indexer(txn, collection);
+ MultiIndexBlock indexer(opCtx, collection);
Status status = indexer.init(o).getStatus();
if (status.code() == ErrorCodes::IndexAlreadyExists)
@@ -106,7 +106,7 @@ void Helpers::ensureIndex(OperationContext* txn,
uassertStatusOK(indexer.insertAllDocumentsInCollection());
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
indexer.commit();
wunit.commit();
}
@@ -114,40 +114,40 @@ void Helpers::ensureIndex(OperationContext* txn,
/* fetch a single object from collection ns that matches query
set your db SavedContext first
*/
-bool Helpers::findOne(OperationContext* txn,
+bool Helpers::findOne(OperationContext* opCtx,
Collection* collection,
const BSONObj& query,
BSONObj& result,
bool requireIndex) {
- RecordId loc = findOne(txn, collection, query, requireIndex);
+ RecordId loc = findOne(opCtx, collection, query, requireIndex);
if (loc.isNull())
return false;
- result = collection->docFor(txn, loc).value();
+ result = collection->docFor(opCtx, loc).value();
return true;
}
/* fetch a single object from collection ns that matches query
set your db SavedContext first
*/
-RecordId Helpers::findOne(OperationContext* txn,
+RecordId Helpers::findOne(OperationContext* opCtx,
Collection* collection,
const BSONObj& query,
bool requireIndex) {
if (!collection)
return RecordId();
- const ExtensionsCallbackReal extensionsCallback(txn, &collection->ns());
+ const ExtensionsCallbackReal extensionsCallback(opCtx, &collection->ns());
auto qr = stdx::make_unique<QueryRequest>(collection->ns());
qr->setFilter(query);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
+ auto statusWithCQ = CanonicalQuery::canonicalize(opCtx, std::move(qr), extensionsCallback);
massert(17244, "Could not canonicalize " + query.toString(), statusWithCQ.isOK());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
size_t options = requireIndex ? QueryPlannerParams::NO_TABLE_SCAN : QueryPlannerParams::DEFAULT;
auto statusWithPlanExecutor =
- getExecutor(txn, collection, std::move(cq), PlanExecutor::YIELD_MANUAL, options);
+ getExecutor(opCtx, collection, std::move(cq), PlanExecutor::YIELD_MANUAL, options);
massert(17245,
"Could not get executor for query " + query.toString(),
statusWithPlanExecutor.isOK());
@@ -165,7 +165,7 @@ RecordId Helpers::findOne(OperationContext* txn,
return RecordId();
}
-bool Helpers::findById(OperationContext* txn,
+bool Helpers::findById(OperationContext* opCtx,
Database* database,
const char* ns,
BSONObj query,
@@ -183,7 +183,7 @@ bool Helpers::findById(OperationContext* txn,
*nsFound = true;
IndexCatalog* catalog = collection->getIndexCatalog();
- const IndexDescriptor* desc = catalog->findIdIndex(txn);
+ const IndexDescriptor* desc = catalog->findIdIndex(opCtx);
if (!desc)
return false;
@@ -191,28 +191,30 @@ bool Helpers::findById(OperationContext* txn,
if (indexFound)
*indexFound = 1;
- RecordId loc = catalog->getIndex(desc)->findSingle(txn, query["_id"].wrap());
+ RecordId loc = catalog->getIndex(desc)->findSingle(opCtx, query["_id"].wrap());
if (loc.isNull())
return false;
- result = collection->docFor(txn, loc).value();
+ result = collection->docFor(opCtx, loc).value();
return true;
}
-RecordId Helpers::findById(OperationContext* txn, Collection* collection, const BSONObj& idquery) {
+RecordId Helpers::findById(OperationContext* opCtx,
+ Collection* collection,
+ const BSONObj& idquery) {
verify(collection);
IndexCatalog* catalog = collection->getIndexCatalog();
- const IndexDescriptor* desc = catalog->findIdIndex(txn);
+ const IndexDescriptor* desc = catalog->findIdIndex(opCtx);
uassert(13430, "no _id index", desc);
- return catalog->getIndex(desc)->findSingle(txn, idquery["_id"].wrap());
+ return catalog->getIndex(desc)->findSingle(opCtx, idquery["_id"].wrap());
}
-bool Helpers::getSingleton(OperationContext* txn, const char* ns, BSONObj& result) {
- AutoGetCollectionForRead ctx(txn, NamespaceString(ns));
- unique_ptr<PlanExecutor> exec(
- InternalPlanner::collectionScan(txn, ns, ctx.getCollection(), PlanExecutor::YIELD_MANUAL));
+bool Helpers::getSingleton(OperationContext* opCtx, const char* ns, BSONObj& result) {
+ AutoGetCollectionForRead ctx(opCtx, NamespaceString(ns));
+ unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(
+ opCtx, ns, ctx.getCollection(), PlanExecutor::YIELD_MANUAL));
PlanExecutor::ExecState state = exec->getNext(&result, NULL);
- CurOp::get(txn)->done();
+ CurOp::get(opCtx)->done();
// Non-yielding collection scans from InternalPlanner will never error.
invariant(PlanExecutor::ADVANCED == state || PlanExecutor::IS_EOF == state);
@@ -225,10 +227,13 @@ bool Helpers::getSingleton(OperationContext* txn, const char* ns, BSONObj& resul
return false;
}
-bool Helpers::getLast(OperationContext* txn, const char* ns, BSONObj& result) {
- AutoGetCollectionForRead autoColl(txn, NamespaceString(ns));
- unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(
- txn, ns, autoColl.getCollection(), PlanExecutor::YIELD_MANUAL, InternalPlanner::BACKWARD));
+bool Helpers::getLast(OperationContext* opCtx, const char* ns, BSONObj& result) {
+ AutoGetCollectionForRead autoColl(opCtx, NamespaceString(ns));
+ unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(opCtx,
+ ns,
+ autoColl.getCollection(),
+ PlanExecutor::YIELD_MANUAL,
+ InternalPlanner::BACKWARD));
PlanExecutor::ExecState state = exec->getNext(&result, NULL);
// Non-yielding collection scans from InternalPlanner will never error.
@@ -242,12 +247,15 @@ bool Helpers::getLast(OperationContext* txn, const char* ns, BSONObj& result) {
return false;
}
-void Helpers::upsert(OperationContext* txn, const string& ns, const BSONObj& o, bool fromMigrate) {
+void Helpers::upsert(OperationContext* opCtx,
+ const string& ns,
+ const BSONObj& o,
+ bool fromMigrate) {
BSONElement e = o["_id"];
verify(e.type());
BSONObj id = e.wrap();
- OldClientContext context(txn, ns);
+ OldClientContext context(opCtx, ns);
const NamespaceString requestNs(ns);
UpdateRequest request(requestNs);
@@ -259,11 +267,11 @@ void Helpers::upsert(OperationContext* txn, const string& ns, const BSONObj& o,
UpdateLifecycleImpl updateLifecycle(requestNs);
request.setLifecycle(&updateLifecycle);
- update(txn, context.db(), request);
+ update(opCtx, context.db(), request);
}
-void Helpers::putSingleton(OperationContext* txn, const char* ns, BSONObj obj) {
- OldClientContext context(txn, ns);
+void Helpers::putSingleton(OperationContext* opCtx, const char* ns, BSONObj obj) {
+ OldClientContext context(opCtx, ns);
const NamespaceString requestNs(ns);
UpdateRequest request(requestNs);
@@ -273,9 +281,9 @@ void Helpers::putSingleton(OperationContext* txn, const char* ns, BSONObj obj) {
UpdateLifecycleImpl updateLifecycle(requestNs);
request.setLifecycle(&updateLifecycle);
- update(txn, context.db(), request);
+ update(opCtx, context.db(), request);
- CurOp::get(txn)->done();
+ CurOp::get(opCtx)->done();
}
BSONObj Helpers::toKeyFormat(const BSONObj& o) {
@@ -294,7 +302,7 @@ BSONObj Helpers::inferKeyPattern(const BSONObj& o) {
return kpBuilder.obj();
}
-long long Helpers::removeRange(OperationContext* txn,
+long long Helpers::removeRange(OperationContext* opCtx,
const KeyRange& range,
BoundInclusion boundInclusion,
const WriteConcernOptions& writeConcern,
@@ -311,7 +319,7 @@ long long Helpers::removeRange(OperationContext* txn,
BSONObj max;
{
- AutoGetCollectionForRead ctx(txn, nss);
+ AutoGetCollectionForRead ctx(opCtx, nss);
Collection* collection = ctx.getCollection();
if (!collection) {
warning(LogComponent::kSharding)
@@ -324,7 +332,7 @@ long long Helpers::removeRange(OperationContext* txn,
// Therefore, any multi-key index prefixed by shard key cannot be multikey over
// the shard key fields.
const IndexDescriptor* idx =
- collection->getIndexCatalog()->findShardKeyPrefixedIndex(txn,
+ collection->getIndexCatalog()->findShardKeyPrefixedIndex(opCtx,
range.keyPattern,
false); // requireSingleKey
if (!idx) {
@@ -359,12 +367,13 @@ long long Helpers::removeRange(OperationContext* txn,
while (1) {
// Scoping for write lock.
{
- AutoGetCollection ctx(txn, nss, MODE_IX, MODE_IX);
+ AutoGetCollection ctx(opCtx, nss, MODE_IX, MODE_IX);
Collection* collection = ctx.getCollection();
if (!collection)
break;
- IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByName(txn, indexName);
+ IndexDescriptor* desc =
+ collection->getIndexCatalog()->findIndexByName(opCtx, indexName);
if (!desc) {
warning(LogComponent::kSharding) << "shard key index '" << indexName << "' on '"
@@ -373,7 +382,7 @@ long long Helpers::removeRange(OperationContext* txn,
}
unique_ptr<PlanExecutor> exec(
- InternalPlanner::indexScan(txn,
+ InternalPlanner::indexScan(opCtx,
collection,
desc,
min,
@@ -404,7 +413,7 @@ long long Helpers::removeRange(OperationContext* txn,
verify(PlanExecutor::ADVANCED == state);
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
if (onlyRemoveOrphanedDocs) {
// Do a final check in the write lock to make absolutely sure that our
@@ -413,12 +422,12 @@ long long Helpers::removeRange(OperationContext* txn,
// We should never be able to turn off the sharding state once enabled, but
// in the future we might want to.
- verify(ShardingState::get(txn)->enabled());
+ verify(ShardingState::get(opCtx)->enabled());
bool docIsOrphan;
// In write lock, so will be the most up-to-date version
- auto metadataNow = CollectionShardingState::get(txn, nss.ns())->getMetadata();
+ auto metadataNow = CollectionShardingState::get(opCtx, nss.ns())->getMetadata();
if (metadataNow) {
ShardKeyPattern kp(metadataNow->getKeyPattern());
BSONObj key = kp.extractShardKeyFromDoc(obj);
@@ -437,7 +446,7 @@ long long Helpers::removeRange(OperationContext* txn,
}
}
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, nss)) {
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, nss)) {
warning() << "stepped down from primary while deleting chunk; "
<< "orphaning data in " << nss.ns() << " in range [" << redact(min)
<< ", " << redact(max) << ")";
@@ -448,7 +457,7 @@ long long Helpers::removeRange(OperationContext* txn,
callback->goingToDelete(obj);
OpDebug* const nullOpDebug = nullptr;
- collection->deleteDocument(txn, rloc, nullOpDebug, fromMigrate);
+ collection->deleteDocument(opCtx, rloc, nullOpDebug, fromMigrate);
wuow.commit();
numDeleted++;
}
@@ -459,8 +468,8 @@ long long Helpers::removeRange(OperationContext* txn,
if (writeConcern.shouldWaitForOtherNodes() && numDeleted > 0) {
repl::ReplicationCoordinator::StatusAndDuration replStatus =
repl::getGlobalReplicationCoordinator()->awaitReplication(
- txn,
- repl::ReplClientInfo::forClient(txn->getClient()).getLastOp(),
+ opCtx,
+ repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(),
writeConcern);
if (replStatus.status.code() == ErrorCodes::ExceededTimeLimit) {
warning(LogComponent::kSharding) << "replication to secondaries for removeRange at "
@@ -484,13 +493,13 @@ long long Helpers::removeRange(OperationContext* txn,
return numDeleted;
}
-void Helpers::emptyCollection(OperationContext* txn, const char* ns) {
- OldClientContext context(txn, ns);
- bool shouldReplicateWrites = txn->writesAreReplicated();
- txn->setReplicatedWrites(false);
- ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, txn, shouldReplicateWrites);
+void Helpers::emptyCollection(OperationContext* opCtx, const char* ns) {
+ OldClientContext context(opCtx, ns);
+ bool shouldReplicateWrites = opCtx->writesAreReplicated();
+ opCtx->setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, opCtx, shouldReplicateWrites);
Collection* collection = context.db() ? context.db()->getCollection(ns) : nullptr;
- deleteObjects(txn, collection, ns, BSONObj(), PlanExecutor::YIELD_MANUAL, false);
+ deleteObjects(opCtx, collection, ns, BSONObj(), PlanExecutor::YIELD_MANUAL, false);
}
Helpers::RemoveSaver::RemoveSaver(const string& a, const string& b, const string& why) {
diff --git a/src/mongo/db/dbhelpers.h b/src/mongo/db/dbhelpers.h
index bb303a681c4..3ff0318b687 100644
--- a/src/mongo/db/dbhelpers.h
+++ b/src/mongo/db/dbhelpers.h
@@ -64,7 +64,7 @@ struct Helpers {
Note: does nothing if collection does not yet exist.
*/
- static void ensureIndex(OperationContext* txn,
+ static void ensureIndex(OperationContext* opCtx,
Collection* collection,
BSONObj keyPattern,
IndexDescriptor::IndexVersion indexVersion,
@@ -82,13 +82,13 @@ struct Helpers {
@return true if object found
*/
- static bool findOne(OperationContext* txn,
+ static bool findOne(OperationContext* opCtx,
Collection* collection,
const BSONObj& query,
BSONObj& result,
bool requireIndex = false);
- static RecordId findOne(OperationContext* txn,
+ static RecordId findOne(OperationContext* opCtx,
Collection* collection,
const BSONObj& query,
bool requireIndex);
@@ -97,7 +97,7 @@ struct Helpers {
* @param foundIndex if passed in will be set to 1 if ns and index found
* @return true if object found
*/
- static bool findById(OperationContext* txn,
+ static bool findById(OperationContext* opCtx,
Database* db,
const char* ns,
BSONObj query,
@@ -108,7 +108,7 @@ struct Helpers {
/* TODO: should this move into Collection?
* uasserts if no _id index.
* @return null loc if not found */
- static RecordId findById(OperationContext* txn, Collection* collection, const BSONObj& query);
+ static RecordId findById(OperationContext* opCtx, Collection* collection, const BSONObj& query);
/**
* Get the first object generated from a forward natural-order scan on "ns". Callers do not
@@ -119,25 +119,25 @@ struct Helpers {
*
* Returns false if there is no such object.
*/
- static bool getSingleton(OperationContext* txn, const char* ns, BSONObj& result);
+ static bool getSingleton(OperationContext* opCtx, const char* ns, BSONObj& result);
/**
* Same as getSingleton, but with a reverse natural-order scan on "ns".
*/
- static bool getLast(OperationContext* txn, const char* ns, BSONObj& result);
+ static bool getLast(OperationContext* opCtx, const char* ns, BSONObj& result);
/**
* Performs an upsert of "obj" into the collection "ns", with an empty update predicate.
* Callers must have "ns" locked.
*/
- static void putSingleton(OperationContext* txn, const char* ns, BSONObj obj);
+ static void putSingleton(OperationContext* opCtx, const char* ns, BSONObj obj);
/**
* you have to lock
* you do not have to have Context set
* o has to have an _id field or will assert
*/
- static void upsert(OperationContext* txn,
+ static void upsert(OperationContext* opCtx,
const std::string& ns,
const BSONObj& o,
bool fromMigrate = false);
@@ -171,7 +171,7 @@ struct Helpers {
* Does oplog the individual document deletions.
* // TODO: Refactor this mechanism, it is growing too large
*/
- static long long removeRange(OperationContext* txn,
+ static long long removeRange(OperationContext* opCtx,
const KeyRange& range,
BoundInclusion boundInclusion,
const WriteConcernOptions& secondaryThrottle,
@@ -184,7 +184,7 @@ struct Helpers {
* You do not need to set the database before calling.
* Does not oplog the operation.
*/
- static void emptyCollection(OperationContext* txn, const char* ns);
+ static void emptyCollection(OperationContext* opCtx, const char* ns);
/**
* for saving deleted bson objects to a flat file
diff --git a/src/mongo/db/dbwebserver.cpp b/src/mongo/db/dbwebserver.cpp
index 41b097c5718..34b06d1f784 100644
--- a/src/mongo/db/dbwebserver.cpp
+++ b/src/mongo/db/dbwebserver.cpp
@@ -162,7 +162,7 @@ public:
virtual void init() {}
- virtual void run(OperationContext* txn, stringstream& ss) {
+ virtual void run(OperationContext* opCtx, stringstream& ss) {
_log->toHTML(ss);
}
RamLog* _log;
@@ -172,7 +172,7 @@ class FavIconHandler : public DbWebHandler {
public:
FavIconHandler() : DbWebHandler("favicon.ico", 0, false) {}
- virtual void handle(OperationContext* txn,
+ virtual void handle(OperationContext* opCtx,
const char* rq,
const std::string& url,
BSONObj params,
@@ -191,7 +191,7 @@ class StatusHandler : public DbWebHandler {
public:
StatusHandler() : DbWebHandler("_status", 1, false) {}
- virtual void handle(OperationContext* txn,
+ virtual void handle(OperationContext* opCtx,
const char* rq,
const std::string& url,
BSONObj params,
@@ -231,7 +231,7 @@ public:
string errmsg;
BSONObjBuilder sub;
- if (!c->run(txn, "admin.$cmd", co, 0, errmsg, sub))
+ if (!c->run(opCtx, "admin.$cmd", co, 0, errmsg, sub))
buf.append(cmd, errmsg);
else
buf.append(cmd, sub.obj());
@@ -246,7 +246,7 @@ class CommandListHandler : public DbWebHandler {
public:
CommandListHandler() : DbWebHandler("_commands", 1, true) {}
- virtual void handle(OperationContext* txn,
+ virtual void handle(OperationContext* opCtx,
const char* rq,
const std::string& url,
BSONObj params,
@@ -305,7 +305,7 @@ public:
return _cmd(cmd) != 0;
}
- virtual void handle(OperationContext* txn,
+ virtual void handle(OperationContext* opCtx,
const char* rq,
const std::string& url,
BSONObj params,
@@ -330,7 +330,7 @@ public:
rpc::CommandRequest cmdRequest{&cmdRequestMsg};
rpc::CommandReplyBuilder cmdReplyBuilder{};
- Command::execCommand(txn, c, cmdRequest, &cmdReplyBuilder);
+ Command::execCommand(opCtx, c, cmdRequest, &cmdReplyBuilder);
auto cmdReplyMsg = cmdReplyBuilder.done();
rpc::CommandReply cmdReply{&cmdReplyMsg};
@@ -373,10 +373,10 @@ void DbWebServer::doRequest(const char* rq,
vector<string>& headers,
const SockAddr& from) {
Client* client = &cc();
- auto txn = client->makeOperationContext();
+ auto opCtx = client->makeOperationContext();
if (url.size() > 1) {
- if (!_allowed(txn.get(), rq, headers, from)) {
+ if (!_allowed(opCtx.get(), rq, headers, from)) {
responseCode = 401;
headers.push_back("Content-Type: text/plain;charset=utf-8");
responseMsg = "not allowed\n";
@@ -403,7 +403,7 @@ void DbWebServer::doRequest(const char* rq,
callback.empty() || serverGlobalParams.jsonp);
handler->handle(
- txn.get(), rq, url, params, responseMsg, responseCode, headers, from);
+ opCtx.get(), rq, url, params, responseMsg, responseCode, headers, from);
if (responseCode == 200 && !callback.empty()) {
responseMsg = callback + '(' + responseMsg + ')';
@@ -427,7 +427,7 @@ void DbWebServer::doRequest(const char* rq,
// generate home page
- if (!_allowed(txn.get(), rq, headers, from)) {
+ if (!_allowed(opCtx.get(), rq, headers, from)) {
responseCode = 401;
headers.push_back("Content-Type: text/plain;charset=utf-8");
responseMsg = "not allowed\n";
@@ -476,23 +476,23 @@ void DbWebServer::doRequest(const char* rq,
doUnlockedStuff(ss);
- WebStatusPlugin::runAll(txn.get(), ss);
+ WebStatusPlugin::runAll(opCtx.get(), ss);
ss << "</body></html>\n";
responseMsg = ss.str();
headers.push_back("Content-Type: text/html;charset=utf-8");
}
-bool DbWebServer::_allowed(OperationContext* txn,
+bool DbWebServer::_allowed(OperationContext* opCtx,
const char* rq,
vector<string>& headers,
const SockAddr& from) {
- AuthorizationSession* authSess = AuthorizationSession::get(txn->getClient());
+ AuthorizationSession* authSess = AuthorizationSession::get(opCtx->getClient());
if (!authSess->getAuthorizationManager().isAuthEnabled()) {
return true;
}
- if (from.isLocalHost() && !_webUsers->haveAdminUsers(txn)) {
+ if (from.isLocalHost() && !_webUsers->haveAdminUsers(opCtx)) {
authSess->grantInternalAuthorization();
return true;
}
@@ -515,7 +515,7 @@ bool DbWebServer::_allowed(OperationContext* txn,
UserName userName(parms["username"], "admin");
User* user;
AuthorizationManager& authzManager = authSess->getAuthorizationManager();
- Status status = authzManager.acquireUser(txn, userName, &user);
+ Status status = authzManager.acquireUser(opCtx, userName, &user);
if (!status.isOK()) {
if (status.code() != ErrorCodes::UserNotFound) {
uasserted(17051, status.reason());
@@ -548,7 +548,7 @@ bool DbWebServer::_allowed(OperationContext* txn,
const string r1 = md5simpledigest(r.str());
if (r1 == parms["response"]) {
- Status status = authSess->addAndAuthorizeUser(txn, userName);
+ Status status = authSess->addAndAuthorizeUser(opCtx, userName);
uassertStatusOK(status);
return true;
}
@@ -593,7 +593,7 @@ void WebStatusPlugin::initAll() {
(*_plugins)[i]->init();
}
-void WebStatusPlugin::runAll(OperationContext* txn, stringstream& ss) {
+void WebStatusPlugin::runAll(OperationContext* opCtx, stringstream& ss) {
if (!_plugins)
return;
@@ -606,7 +606,7 @@ void WebStatusPlugin::runAll(OperationContext* txn, stringstream& ss) {
ss << "<br>\n";
- p->run(txn, ss);
+ p->run(opCtx, ss);
}
}
diff --git a/src/mongo/db/dbwebserver.h b/src/mongo/db/dbwebserver.h
index df00f2f0e6d..05de60f21e1 100644
--- a/src/mongo/db/dbwebserver.h
+++ b/src/mongo/db/dbwebserver.h
@@ -71,7 +71,7 @@ public:
return _requiresREST;
}
- virtual void handle(OperationContext* txn,
+ virtual void handle(OperationContext* opCtx,
const char* rq, // the full request
const std::string& url,
BSONObj params,
@@ -105,12 +105,12 @@ public:
const std::string& subheader = "");
virtual ~WebStatusPlugin() {}
- virtual void run(OperationContext* txn, std::stringstream& ss) = 0;
+ virtual void run(OperationContext* opCtx, std::stringstream& ss) = 0;
/** called when web server stats up */
virtual void init() = 0;
static void initAll();
- static void runAll(OperationContext* txn, std::stringstream& ss);
+ static void runAll(OperationContext* opCtx, std::stringstream& ss);
private:
std::string _name;
@@ -130,7 +130,7 @@ private:
std::vector<std::string>& headers,
const SockAddr& from);
- bool _allowed(OperationContext* txn,
+ bool _allowed(OperationContext* opCtx,
const char* rq,
std::vector<std::string>& headers,
const SockAddr& from);
diff --git a/src/mongo/db/exec/and_hash.cpp b/src/mongo/db/exec/and_hash.cpp
index 033949827bc..7f8f2cb9bc4 100644
--- a/src/mongo/db/exec/and_hash.cpp
+++ b/src/mongo/db/exec/and_hash.cpp
@@ -412,7 +412,9 @@ PlanStage::StageState AndHashStage::hashOtherChildren(WorkingSetID* out) {
}
}
-void AndHashStage::doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+void AndHashStage::doInvalidate(OperationContext* opCtx,
+ const RecordId& dl,
+ InvalidationType type) {
// TODO remove this since calling isEOF is illegal inside of doInvalidate().
if (isEOF()) {
return;
@@ -424,7 +426,7 @@ void AndHashStage::doInvalidate(OperationContext* txn, const RecordId& dl, Inval
if (WorkingSet::INVALID_ID != _lookAheadResults[i]) {
WorkingSetMember* member = _ws->get(_lookAheadResults[i]);
if (member->hasRecordId() && member->recordId == dl) {
- WorkingSetCommon::fetchAndInvalidateRecordId(txn, member, _collection);
+ WorkingSetCommon::fetchAndInvalidateRecordId(opCtx, member, _collection);
_ws->flagForReview(_lookAheadResults[i]);
_lookAheadResults[i] = WorkingSet::INVALID_ID;
}
@@ -453,7 +455,7 @@ void AndHashStage::doInvalidate(OperationContext* txn, const RecordId& dl, Inval
_memUsage -= member->getMemUsage();
// The RecordId is about to be invalidated. Fetch it and clear the RecordId.
- WorkingSetCommon::fetchAndInvalidateRecordId(txn, member, _collection);
+ WorkingSetCommon::fetchAndInvalidateRecordId(opCtx, member, _collection);
// Add the WSID to the to-be-reviewed list in the WS.
_ws->flagForReview(id);
diff --git a/src/mongo/db/exec/and_hash.h b/src/mongo/db/exec/and_hash.h
index 456062dc41e..8d28f72e16e 100644
--- a/src/mongo/db/exec/and_hash.h
+++ b/src/mongo/db/exec/and_hash.h
@@ -73,7 +73,7 @@ public:
StageState doWork(WorkingSetID* out) final;
bool isEOF() final;
- void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
+ void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
StageType stageType() const final {
return STAGE_AND_HASH;
diff --git a/src/mongo/db/exec/and_sorted.cpp b/src/mongo/db/exec/and_sorted.cpp
index 47efc2aba20..ce073bbb534 100644
--- a/src/mongo/db/exec/and_sorted.cpp
+++ b/src/mongo/db/exec/and_sorted.cpp
@@ -242,7 +242,7 @@ PlanStage::StageState AndSortedStage::moveTowardTargetRecordId(WorkingSetID* out
}
-void AndSortedStage::doInvalidate(OperationContext* txn,
+void AndSortedStage::doInvalidate(OperationContext* opCtx,
const RecordId& dl,
InvalidationType type) {
// TODO remove this since calling isEOF is illegal inside of doInvalidate().
@@ -259,7 +259,7 @@ void AndSortedStage::doInvalidate(OperationContext* txn,
++_specificStats.flagged;
// The RecordId could still be a valid result so flag it and save it for later.
- WorkingSetCommon::fetchAndInvalidateRecordId(txn, _ws->get(_targetId), _collection);
+ WorkingSetCommon::fetchAndInvalidateRecordId(opCtx, _ws->get(_targetId), _collection);
_ws->flagForReview(_targetId);
_targetId = WorkingSet::INVALID_ID;
diff --git a/src/mongo/db/exec/and_sorted.h b/src/mongo/db/exec/and_sorted.h
index be245b9583f..b7f422a0136 100644
--- a/src/mongo/db/exec/and_sorted.h
+++ b/src/mongo/db/exec/and_sorted.h
@@ -60,7 +60,7 @@ public:
StageState doWork(WorkingSetID* out) final;
bool isEOF() final;
- void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
+ void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
StageType stageType() const final {
return STAGE_AND_SORTED;
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index f1867423b78..9c10ccf7ea6 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -53,14 +53,14 @@ namespace mongo {
// static
const char* CachedPlanStage::kStageType = "CACHED_PLAN";
-CachedPlanStage::CachedPlanStage(OperationContext* txn,
+CachedPlanStage::CachedPlanStage(OperationContext* opCtx,
Collection* collection,
WorkingSet* ws,
CanonicalQuery* cq,
const QueryPlannerParams& params,
size_t decisionWorks,
PlanStage* root)
- : PlanStage(kStageType, txn),
+ : PlanStage(kStageType, opCtx),
_collection(collection),
_ws(ws),
_canonicalQuery(cq),
@@ -299,13 +299,13 @@ PlanStage::StageState CachedPlanStage::doWork(WorkingSetID* out) {
return child()->work(out);
}
-void CachedPlanStage::doInvalidate(OperationContext* txn,
+void CachedPlanStage::doInvalidate(OperationContext* opCtx,
const RecordId& dl,
InvalidationType type) {
for (auto it = _results.begin(); it != _results.end(); ++it) {
WorkingSetMember* member = _ws->get(*it);
if (member->hasRecordId() && member->recordId == dl) {
- WorkingSetCommon::fetchAndInvalidateRecordId(txn, member, _collection);
+ WorkingSetCommon::fetchAndInvalidateRecordId(opCtx, member, _collection);
}
}
}
diff --git a/src/mongo/db/exec/cached_plan.h b/src/mongo/db/exec/cached_plan.h
index 64fb35a9ae6..008fc196491 100644
--- a/src/mongo/db/exec/cached_plan.h
+++ b/src/mongo/db/exec/cached_plan.h
@@ -53,7 +53,7 @@ class PlanYieldPolicy;
*/
class CachedPlanStage final : public PlanStage {
public:
- CachedPlanStage(OperationContext* txn,
+ CachedPlanStage(OperationContext* opCtx,
Collection* collection,
WorkingSet* ws,
CanonicalQuery* cq,
@@ -65,7 +65,7 @@ public:
StageState doWork(WorkingSetID* out) final;
- void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
+ void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
StageType stageType() const final {
return STAGE_CACHED_PLAN;
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
index a102eee8bda..987f7c28513 100644
--- a/src/mongo/db/exec/collection_scan.cpp
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -54,11 +54,11 @@ using stdx::make_unique;
// static
const char* CollectionScan::kStageType = "COLLSCAN";
-CollectionScan::CollectionScan(OperationContext* txn,
+CollectionScan::CollectionScan(OperationContext* opCtx,
const CollectionScanParams& params,
WorkingSet* workingSet,
const MatchExpression* filter)
- : PlanStage(kStageType, txn),
+ : PlanStage(kStageType, opCtx),
_workingSet(workingSet),
_filter(filter),
_params(params),
@@ -200,7 +200,7 @@ bool CollectionScan::isEOF() {
return _commonStats.isEOF || _isDead;
}
-void CollectionScan::doInvalidate(OperationContext* txn,
+void CollectionScan::doInvalidate(OperationContext* opCtx,
const RecordId& id,
InvalidationType type) {
// We don't care about mutations since we apply any filters to the result when we (possibly)
@@ -213,7 +213,7 @@ void CollectionScan::doInvalidate(OperationContext* txn,
// Deletions can harm the underlying RecordCursor so we must pass them down.
if (_cursor) {
- _cursor->invalidate(txn, id);
+ _cursor->invalidate(opCtx, id);
}
if (_params.tailable && id == _lastSeenId) {
diff --git a/src/mongo/db/exec/collection_scan.h b/src/mongo/db/exec/collection_scan.h
index a1f076f8d27..3ba80b90d3f 100644
--- a/src/mongo/db/exec/collection_scan.h
+++ b/src/mongo/db/exec/collection_scan.h
@@ -49,7 +49,7 @@ class OperationContext;
*/
class CollectionScan final : public PlanStage {
public:
- CollectionScan(OperationContext* txn,
+ CollectionScan(OperationContext* opCtx,
const CollectionScanParams& params,
WorkingSet* workingSet,
const MatchExpression* filter);
@@ -57,7 +57,7 @@ public:
StageState doWork(WorkingSetID* out) final;
bool isEOF() final;
- void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
+ void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
void doSaveState() final;
void doRestoreState() final;
void doDetachFromOperationContext() final;
diff --git a/src/mongo/db/exec/count.cpp b/src/mongo/db/exec/count.cpp
index b1a15f756e6..61e50d7b305 100644
--- a/src/mongo/db/exec/count.cpp
+++ b/src/mongo/db/exec/count.cpp
@@ -44,12 +44,12 @@ using stdx::make_unique;
// static
const char* CountStage::kStageType = "COUNT";
-CountStage::CountStage(OperationContext* txn,
+CountStage::CountStage(OperationContext* opCtx,
Collection* collection,
CountStageParams params,
WorkingSet* ws,
PlanStage* child)
- : PlanStage(kStageType, txn),
+ : PlanStage(kStageType, opCtx),
_collection(collection),
_params(std::move(params)),
_leftToSkip(_params.skip),
diff --git a/src/mongo/db/exec/count.h b/src/mongo/db/exec/count.h
index 56cdbe6614c..a876d7386c5 100644
--- a/src/mongo/db/exec/count.h
+++ b/src/mongo/db/exec/count.h
@@ -71,7 +71,7 @@ struct CountStageParams {
*/
class CountStage final : public PlanStage {
public:
- CountStage(OperationContext* txn,
+ CountStage(OperationContext* opCtx,
Collection* collection,
CountStageParams params,
WorkingSet* ws,
diff --git a/src/mongo/db/exec/count_scan.cpp b/src/mongo/db/exec/count_scan.cpp
index 11b63541f75..0693b310b02 100644
--- a/src/mongo/db/exec/count_scan.cpp
+++ b/src/mongo/db/exec/count_scan.cpp
@@ -68,12 +68,12 @@ using stdx::make_unique;
// static
const char* CountScan::kStageType = "COUNT_SCAN";
-CountScan::CountScan(OperationContext* txn, const CountScanParams& params, WorkingSet* workingSet)
- : PlanStage(kStageType, txn),
+CountScan::CountScan(OperationContext* opCtx, const CountScanParams& params, WorkingSet* workingSet)
+ : PlanStage(kStageType, opCtx),
_workingSet(workingSet),
_descriptor(params.descriptor),
_iam(params.descriptor->getIndexCatalog()->getIndex(params.descriptor)),
- _shouldDedup(params.descriptor->isMultikey(txn)),
+ _shouldDedup(params.descriptor->isMultikey(opCtx)),
_params(params) {
_specificStats.keyPattern = _params.descriptor->keyPattern();
if (BSONElement collationElement = _params.descriptor->getInfoElement("collation")) {
@@ -81,8 +81,8 @@ CountScan::CountScan(OperationContext* txn, const CountScanParams& params, Worki
_specificStats.collation = collationElement.Obj().getOwned();
}
_specificStats.indexName = _params.descriptor->indexName();
- _specificStats.isMultiKey = _params.descriptor->isMultikey(txn);
- _specificStats.multiKeyPaths = _params.descriptor->getMultikeyPaths(txn);
+ _specificStats.isMultiKey = _params.descriptor->isMultikey(opCtx);
+ _specificStats.multiKeyPaths = _params.descriptor->getMultikeyPaths(opCtx);
_specificStats.isUnique = _params.descriptor->unique();
_specificStats.isSparse = _params.descriptor->isSparse();
_specificStats.isPartial = _params.descriptor->isPartial();
@@ -170,7 +170,7 @@ void CountScan::doReattachToOperationContext() {
_cursor->reattachToOperationContext(getOpCtx());
}
-void CountScan::doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+void CountScan::doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) {
// The only state we're responsible for holding is what RecordIds to drop. If a document
// mutates the underlying index cursor will deal with it.
if (INVALIDATION_MUTATION == type) {
diff --git a/src/mongo/db/exec/count_scan.h b/src/mongo/db/exec/count_scan.h
index f53c5e3ab0f..d4eb4656133 100644
--- a/src/mongo/db/exec/count_scan.h
+++ b/src/mongo/db/exec/count_scan.h
@@ -67,7 +67,7 @@ struct CountScanParams {
*/
class CountScan final : public PlanStage {
public:
- CountScan(OperationContext* txn, const CountScanParams& params, WorkingSet* workingSet);
+ CountScan(OperationContext* opCtx, const CountScanParams& params, WorkingSet* workingSet);
StageState doWork(WorkingSetID* out) final;
bool isEOF() final;
@@ -75,7 +75,7 @@ public:
void doRestoreState() final;
void doDetachFromOperationContext() final;
void doReattachToOperationContext() final;
- void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
+ void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
StageType stageType() const final {
return STAGE_COUNT_SCAN;
diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp
index 884e5f01538..3aa9e89dc93 100644
--- a/src/mongo/db/exec/delete.cpp
+++ b/src/mongo/db/exec/delete.cpp
@@ -71,12 +71,12 @@ bool shouldRestartDeleteIfNoLongerMatches(const DeleteStageParams& params) {
// static
const char* DeleteStage::kStageType = "DELETE";
-DeleteStage::DeleteStage(OperationContext* txn,
+DeleteStage::DeleteStage(OperationContext* opCtx,
const DeleteStageParams& params,
WorkingSet* ws,
Collection* collection,
PlanStage* child)
- : PlanStage(kStageType, txn),
+ : PlanStage(kStageType, opCtx),
_params(params),
_ws(ws),
_collection(collection),
diff --git a/src/mongo/db/exec/delete.h b/src/mongo/db/exec/delete.h
index 5c2435a2122..dff707e89b3 100644
--- a/src/mongo/db/exec/delete.h
+++ b/src/mongo/db/exec/delete.h
@@ -84,7 +84,7 @@ class DeleteStage final : public PlanStage {
MONGO_DISALLOW_COPYING(DeleteStage);
public:
- DeleteStage(OperationContext* txn,
+ DeleteStage(OperationContext* opCtx,
const DeleteStageParams& params,
WorkingSet* ws,
Collection* collection,
diff --git a/src/mongo/db/exec/distinct_scan.cpp b/src/mongo/db/exec/distinct_scan.cpp
index dbf53236c32..380bcf3474b 100644
--- a/src/mongo/db/exec/distinct_scan.cpp
+++ b/src/mongo/db/exec/distinct_scan.cpp
@@ -45,10 +45,10 @@ using stdx::make_unique;
// static
const char* DistinctScan::kStageType = "DISTINCT_SCAN";
-DistinctScan::DistinctScan(OperationContext* txn,
+DistinctScan::DistinctScan(OperationContext* opCtx,
const DistinctParams& params,
WorkingSet* workingSet)
- : PlanStage(kStageType, txn),
+ : PlanStage(kStageType, opCtx),
_workingSet(workingSet),
_descriptor(params.descriptor),
_iam(params.descriptor->getIndexCatalog()->getIndex(params.descriptor)),
diff --git a/src/mongo/db/exec/distinct_scan.h b/src/mongo/db/exec/distinct_scan.h
index a82815a3793..d87090dcfd6 100644
--- a/src/mongo/db/exec/distinct_scan.h
+++ b/src/mongo/db/exec/distinct_scan.h
@@ -74,7 +74,7 @@ struct DistinctParams {
*/
class DistinctScan final : public PlanStage {
public:
- DistinctScan(OperationContext* txn, const DistinctParams& params, WorkingSet* workingSet);
+ DistinctScan(OperationContext* opCtx, const DistinctParams& params, WorkingSet* workingSet);
StageState doWork(WorkingSetID* out) final;
bool isEOF() final;
diff --git a/src/mongo/db/exec/fetch.cpp b/src/mongo/db/exec/fetch.cpp
index d2309d1217b..d42a86a8af0 100644
--- a/src/mongo/db/exec/fetch.cpp
+++ b/src/mongo/db/exec/fetch.cpp
@@ -49,12 +49,12 @@ using stdx::make_unique;
// static
const char* FetchStage::kStageType = "FETCH";
-FetchStage::FetchStage(OperationContext* txn,
+FetchStage::FetchStage(OperationContext* opCtx,
WorkingSet* ws,
PlanStage* child,
const MatchExpression* filter,
const Collection* collection)
- : PlanStage(kStageType, txn),
+ : PlanStage(kStageType, opCtx),
_collection(collection),
_ws(ws),
_filter(filter),
@@ -170,14 +170,14 @@ void FetchStage::doReattachToOperationContext() {
_cursor->reattachToOperationContext(getOpCtx());
}
-void FetchStage::doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+void FetchStage::doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) {
// It's possible that the recordId getting invalidated is the one we're about to
// fetch. In this case we do a "forced fetch" and put the WSM in owned object state.
if (WorkingSet::INVALID_ID != _idRetrying) {
WorkingSetMember* member = _ws->get(_idRetrying);
if (member->hasRecordId() && (member->recordId == dl)) {
// Fetch it now and kill the recordId.
- WorkingSetCommon::fetchAndInvalidateRecordId(txn, member, _collection);
+ WorkingSetCommon::fetchAndInvalidateRecordId(opCtx, member, _collection);
}
}
}
diff --git a/src/mongo/db/exec/fetch.h b/src/mongo/db/exec/fetch.h
index 981a2f812c8..a1c78c970c6 100644
--- a/src/mongo/db/exec/fetch.h
+++ b/src/mongo/db/exec/fetch.h
@@ -49,7 +49,7 @@ class SeekableRecordCursor;
*/
class FetchStage : public PlanStage {
public:
- FetchStage(OperationContext* txn,
+ FetchStage(OperationContext* opCtx,
WorkingSet* ws,
PlanStage* child,
const MatchExpression* filter,
@@ -64,7 +64,7 @@ public:
void doRestoreState() final;
void doDetachFromOperationContext() final;
void doReattachToOperationContext() final;
- void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
+ void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
StageType stageType() const final {
return STAGE_FETCH;
diff --git a/src/mongo/db/exec/geo_near.cpp b/src/mongo/db/exec/geo_near.cpp
index 744e2e6fc0b..800a4ee76b2 100644
--- a/src/mongo/db/exec/geo_near.cpp
+++ b/src/mongo/db/exec/geo_near.cpp
@@ -288,14 +288,14 @@ public:
_currentLevel = std::max(0u, hashParams.bits - 1u);
}
- PlanStage::StageState work(OperationContext* txn,
+ PlanStage::StageState work(OperationContext* opCtx,
WorkingSet* workingSet,
Collection* collection,
WorkingSetID* out,
double* estimatedDistance);
private:
- void buildIndexScan(OperationContext* txn, WorkingSet* workingSet, Collection* collection);
+ void buildIndexScan(OperationContext* opCtx, WorkingSet* workingSet, Collection* collection);
PlanStage::Children* _children; // Points to PlanStage::_children in the NearStage.
const IndexDescriptor* _twoDIndex; // Not owned here.
@@ -308,7 +308,7 @@ private:
};
// Initialize the internal states
-void GeoNear2DStage::DensityEstimator::buildIndexScan(OperationContext* txn,
+void GeoNear2DStage::DensityEstimator::buildIndexScan(OperationContext* opCtx,
WorkingSet* workingSet,
Collection* collection) {
IndexScanParams scanParams;
@@ -348,20 +348,20 @@ void GeoNear2DStage::DensityEstimator::buildIndexScan(OperationContext* txn,
IndexBoundsBuilder::intersectize(oil, &scanParams.bounds.fields[twoDFieldPosition]);
invariant(!_indexScan);
- _indexScan = new IndexScan(txn, scanParams, workingSet, NULL);
+ _indexScan = new IndexScan(opCtx, scanParams, workingSet, NULL);
_children->emplace_back(_indexScan);
}
// Return IS_EOF is we find a document in it's ancestor cells and set estimated distance
// from the nearest document.
-PlanStage::StageState GeoNear2DStage::DensityEstimator::work(OperationContext* txn,
+PlanStage::StageState GeoNear2DStage::DensityEstimator::work(OperationContext* opCtx,
WorkingSet* workingSet,
Collection* collection,
WorkingSetID* out,
double* estimatedDistance) {
if (!_indexScan) {
// Setup index scan stage for current level.
- buildIndexScan(txn, workingSet, collection);
+ buildIndexScan(opCtx, workingSet, collection);
}
WorkingSetID workingSetID;
@@ -429,7 +429,7 @@ PlanStage::StageState GeoNear2DStage::DensityEstimator::work(OperationContext* t
return state;
}
-PlanStage::StageState GeoNear2DStage::initialize(OperationContext* txn,
+PlanStage::StageState GeoNear2DStage::initialize(OperationContext* opCtx,
WorkingSet* workingSet,
Collection* collection,
WorkingSetID* out) {
@@ -440,7 +440,7 @@ PlanStage::StageState GeoNear2DStage::initialize(OperationContext* txn,
double estimatedDistance;
PlanStage::StageState state =
- _densityEstimator->work(txn, workingSet, collection, out, &estimatedDistance);
+ _densityEstimator->work(opCtx, workingSet, collection, out, &estimatedDistance);
if (state == PlanStage::IS_EOF) {
// 2d index only works with legacy points as centroid. $nearSphere will project
@@ -476,11 +476,11 @@ PlanStage::StageState GeoNear2DStage::initialize(OperationContext* txn,
static const string kTwoDIndexNearStage("GEO_NEAR_2D");
GeoNear2DStage::GeoNear2DStage(const GeoNearParams& nearParams,
- OperationContext* txn,
+ OperationContext* opCtx,
WorkingSet* workingSet,
Collection* collection,
IndexDescriptor* twoDIndex)
- : NearStage(txn, kTwoDIndexNearStage.c_str(), STAGE_GEO_NEAR_2D, workingSet, collection),
+ : NearStage(opCtx, kTwoDIndexNearStage.c_str(), STAGE_GEO_NEAR_2D, workingSet, collection),
_nearParams(nearParams),
_twoDIndex(twoDIndex),
_fullBounds(twoDDistanceBounds(nearParams, twoDIndex)),
@@ -546,12 +546,12 @@ private:
// Helper class to maintain ownership of a match expression alongside an index scan
class FetchStageWithMatch final : public FetchStage {
public:
- FetchStageWithMatch(OperationContext* txn,
+ FetchStageWithMatch(OperationContext* opCtx,
WorkingSet* ws,
PlanStage* child,
MatchExpression* filter,
const Collection* collection)
- : FetchStage(txn, ws, child, filter, collection), _matcher(filter) {}
+ : FetchStage(opCtx, ws, child, filter, collection), _matcher(filter) {}
private:
// Owns matcher
@@ -591,7 +591,7 @@ static R2Annulus projectBoundsToTwoDDegrees(R2Annulus sphereBounds) {
}
StatusWith<NearStage::CoveredInterval*> //
- GeoNear2DStage::nextInterval(OperationContext* txn,
+ GeoNear2DStage::nextInterval(OperationContext* opCtx,
WorkingSet* workingSet,
Collection* collection) {
// The search is finished if we searched at least once and all the way to the edge
@@ -726,7 +726,7 @@ StatusWith<NearStage::CoveredInterval*> //
GeoHashConverter::parseParameters(_twoDIndex->infoObj(), &hashParams);
// 2D indexes support covered search over additional fields they contain
- IndexScan* scan = new IndexScan(txn, scanParams, workingSet, _nearParams.filter);
+ IndexScan* scan = new IndexScan(opCtx, scanParams, workingSet, _nearParams.filter);
MatchExpression* docMatcher = nullptr;
@@ -737,7 +737,8 @@ StatusWith<NearStage::CoveredInterval*> //
}
// FetchStage owns index scan
- _children.emplace_back(new FetchStageWithMatch(txn, workingSet, scan, docMatcher, collection));
+ _children.emplace_back(
+ new FetchStageWithMatch(opCtx, workingSet, scan, docMatcher, collection));
return StatusWith<CoveredInterval*>(new CoveredInterval(_children.back().get(),
true,
@@ -774,11 +775,11 @@ static int getFieldPosition(const IndexDescriptor* index, const string& fieldNam
static const string kS2IndexNearStage("GEO_NEAR_2DSPHERE");
GeoNear2DSphereStage::GeoNear2DSphereStage(const GeoNearParams& nearParams,
- OperationContext* txn,
+ OperationContext* opCtx,
WorkingSet* workingSet,
Collection* collection,
IndexDescriptor* s2Index)
- : NearStage(txn, kS2IndexNearStage.c_str(), STAGE_GEO_NEAR_2DSPHERE, workingSet, collection),
+ : NearStage(opCtx, kS2IndexNearStage.c_str(), STAGE_GEO_NEAR_2DSPHERE, workingSet, collection),
_nearParams(nearParams),
_s2Index(s2Index),
_fullBounds(geoNearDistanceBounds(*nearParams.nearQuery)),
@@ -861,14 +862,14 @@ public:
// Search for a document in neighbors at current level.
// Return IS_EOF is such document exists and set the estimated distance to the nearest doc.
- PlanStage::StageState work(OperationContext* txn,
+ PlanStage::StageState work(OperationContext* opCtx,
WorkingSet* workingSet,
Collection* collection,
WorkingSetID* out,
double* estimatedDistance);
private:
- void buildIndexScan(OperationContext* txn, WorkingSet* workingSet, Collection* collection);
+ void buildIndexScan(OperationContext* opCtx, WorkingSet* workingSet, Collection* collection);
PlanStage::Children* _children; // Points to PlanStage::_children in the NearStage.
const IndexDescriptor* _s2Index; // Not owned here.
@@ -880,7 +881,7 @@ private:
};
// Setup the index scan stage for neighbors at this level.
-void GeoNear2DSphereStage::DensityEstimator::buildIndexScan(OperationContext* txn,
+void GeoNear2DSphereStage::DensityEstimator::buildIndexScan(OperationContext* opCtx,
WorkingSet* workingSet,
Collection* collection) {
IndexScanParams scanParams;
@@ -909,18 +910,18 @@ void GeoNear2DSphereStage::DensityEstimator::buildIndexScan(OperationContext* tx
// Index scan
invariant(!_indexScan);
- _indexScan = new IndexScan(txn, scanParams, workingSet, NULL);
+ _indexScan = new IndexScan(opCtx, scanParams, workingSet, NULL);
_children->emplace_back(_indexScan);
}
-PlanStage::StageState GeoNear2DSphereStage::DensityEstimator::work(OperationContext* txn,
+PlanStage::StageState GeoNear2DSphereStage::DensityEstimator::work(OperationContext* opCtx,
WorkingSet* workingSet,
Collection* collection,
WorkingSetID* out,
double* estimatedDistance) {
if (!_indexScan) {
// Setup index scan stage for current level.
- buildIndexScan(txn, workingSet, collection);
+ buildIndexScan(opCtx, workingSet, collection);
}
WorkingSetID workingSetID;
@@ -991,7 +992,7 @@ PlanStage::StageState GeoNear2DSphereStage::DensityEstimator::work(OperationCont
}
-PlanStage::StageState GeoNear2DSphereStage::initialize(OperationContext* txn,
+PlanStage::StageState GeoNear2DSphereStage::initialize(OperationContext* opCtx,
WorkingSet* workingSet,
Collection* collection,
WorkingSetID* out) {
@@ -1002,7 +1003,7 @@ PlanStage::StageState GeoNear2DSphereStage::initialize(OperationContext* txn,
double estimatedDistance;
PlanStage::StageState state =
- _densityEstimator->work(txn, workingSet, collection, out, &estimatedDistance);
+ _densityEstimator->work(opCtx, workingSet, collection, out, &estimatedDistance);
if (state == IS_EOF) {
// We find a document in 4 neighbors at current level, but didn't at previous level.
@@ -1023,7 +1024,7 @@ PlanStage::StageState GeoNear2DSphereStage::initialize(OperationContext* txn,
}
StatusWith<NearStage::CoveredInterval*> //
- GeoNear2DSphereStage::nextInterval(OperationContext* txn,
+ GeoNear2DSphereStage::nextInterval(OperationContext* opCtx,
WorkingSet* workingSet,
Collection* collection) {
// The search is finished if we searched at least once and all the way to the edge
@@ -1093,10 +1094,10 @@ StatusWith<NearStage::CoveredInterval*> //
OrderedIntervalList* coveredIntervals = &scanParams.bounds.fields[s2FieldPosition];
ExpressionMapping::S2CellIdsToIntervalsWithParents(cover, _indexParams, coveredIntervals);
- IndexScan* scan = new IndexScan(txn, scanParams, workingSet, nullptr);
+ IndexScan* scan = new IndexScan(opCtx, scanParams, workingSet, nullptr);
// FetchStage owns index scan
- _children.emplace_back(new FetchStage(txn, workingSet, scan, _nearParams.filter, collection));
+ _children.emplace_back(new FetchStage(opCtx, workingSet, scan, _nearParams.filter, collection));
return StatusWith<CoveredInterval*>(new CoveredInterval(_children.back().get(),
true,
diff --git a/src/mongo/db/exec/geo_near.h b/src/mongo/db/exec/geo_near.h
index 5d58f616248..dcb0fefc1cd 100644
--- a/src/mongo/db/exec/geo_near.h
+++ b/src/mongo/db/exec/geo_near.h
@@ -66,19 +66,19 @@ struct GeoNearParams {
class GeoNear2DStage final : public NearStage {
public:
GeoNear2DStage(const GeoNearParams& nearParams,
- OperationContext* txn,
+ OperationContext* opCtx,
WorkingSet* workingSet,
Collection* collection,
IndexDescriptor* twoDIndex);
protected:
- StatusWith<CoveredInterval*> nextInterval(OperationContext* txn,
+ StatusWith<CoveredInterval*> nextInterval(OperationContext* opCtx,
WorkingSet* workingSet,
Collection* collection) final;
StatusWith<double> computeDistance(WorkingSetMember* member) final;
- PlanStage::StageState initialize(OperationContext* txn,
+ PlanStage::StageState initialize(OperationContext* opCtx,
WorkingSet* workingSet,
Collection* collection,
WorkingSetID* out) final;
@@ -112,7 +112,7 @@ private:
class GeoNear2DSphereStage final : public NearStage {
public:
GeoNear2DSphereStage(const GeoNearParams& nearParams,
- OperationContext* txn,
+ OperationContext* opCtx,
WorkingSet* workingSet,
Collection* collection,
IndexDescriptor* s2Index);
@@ -120,13 +120,13 @@ public:
~GeoNear2DSphereStage();
protected:
- StatusWith<CoveredInterval*> nextInterval(OperationContext* txn,
+ StatusWith<CoveredInterval*> nextInterval(OperationContext* opCtx,
WorkingSet* workingSet,
Collection* collection) final;
StatusWith<double> computeDistance(WorkingSetMember* member) final;
- PlanStage::StageState initialize(OperationContext* txn,
+ PlanStage::StageState initialize(OperationContext* opCtx,
WorkingSet* workingSet,
Collection* collection,
WorkingSetID* out) final;
diff --git a/src/mongo/db/exec/group.cpp b/src/mongo/db/exec/group.cpp
index 37ce67ad9fc..829aacb3937 100644
--- a/src/mongo/db/exec/group.cpp
+++ b/src/mongo/db/exec/group.cpp
@@ -76,11 +76,11 @@ Status getKey(
// static
const char* GroupStage::kStageType = "GROUP";
-GroupStage::GroupStage(OperationContext* txn,
+GroupStage::GroupStage(OperationContext* opCtx,
const GroupRequest& request,
WorkingSet* workingSet,
PlanStage* child)
- : PlanStage(kStageType, txn),
+ : PlanStage(kStageType, opCtx),
_request(request),
_ws(workingSet),
_specificStats(),
diff --git a/src/mongo/db/exec/group.h b/src/mongo/db/exec/group.h
index 95b8a788a1d..1c796a7391b 100644
--- a/src/mongo/db/exec/group.h
+++ b/src/mongo/db/exec/group.h
@@ -87,7 +87,7 @@ class GroupStage final : public PlanStage {
MONGO_DISALLOW_COPYING(GroupStage);
public:
- GroupStage(OperationContext* txn,
+ GroupStage(OperationContext* opCtx,
const GroupRequest& request,
WorkingSet* workingSet,
PlanStage* child);
diff --git a/src/mongo/db/exec/idhack.cpp b/src/mongo/db/exec/idhack.cpp
index fd392ac91ab..db24744f898 100644
--- a/src/mongo/db/exec/idhack.cpp
+++ b/src/mongo/db/exec/idhack.cpp
@@ -49,12 +49,12 @@ using stdx::make_unique;
// static
const char* IDHackStage::kStageType = "IDHACK";
-IDHackStage::IDHackStage(OperationContext* txn,
+IDHackStage::IDHackStage(OperationContext* opCtx,
const Collection* collection,
CanonicalQuery* query,
WorkingSet* ws,
const IndexDescriptor* descriptor)
- : PlanStage(kStageType, txn),
+ : PlanStage(kStageType, opCtx),
_collection(collection),
_workingSet(ws),
_key(query->getQueryObj()["_id"].wrap()),
@@ -71,12 +71,12 @@ IDHackStage::IDHackStage(OperationContext* txn,
}
}
-IDHackStage::IDHackStage(OperationContext* txn,
+IDHackStage::IDHackStage(OperationContext* opCtx,
Collection* collection,
const BSONObj& key,
WorkingSet* ws,
const IndexDescriptor* descriptor)
- : PlanStage(kStageType, txn),
+ : PlanStage(kStageType, opCtx),
_collection(collection),
_workingSet(ws),
_key(key),
@@ -208,7 +208,7 @@ void IDHackStage::doReattachToOperationContext() {
_recordCursor->reattachToOperationContext(getOpCtx());
}
-void IDHackStage::doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+void IDHackStage::doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) {
// Since updates can't mutate the '_id' field, we can ignore mutation invalidations.
if (INVALIDATION_MUTATION == type) {
return;
@@ -220,7 +220,7 @@ void IDHackStage::doInvalidate(OperationContext* txn, const RecordId& dl, Invali
WorkingSetMember* member = _workingSet->get(_idBeingPagedIn);
if (member->hasRecordId() && (member->recordId == dl)) {
// Fetch it now and kill the RecordId.
- WorkingSetCommon::fetchAndInvalidateRecordId(txn, member, _collection);
+ WorkingSetCommon::fetchAndInvalidateRecordId(opCtx, member, _collection);
}
}
}
diff --git a/src/mongo/db/exec/idhack.h b/src/mongo/db/exec/idhack.h
index ac2466a956b..efb3b4aace3 100644
--- a/src/mongo/db/exec/idhack.h
+++ b/src/mongo/db/exec/idhack.h
@@ -48,13 +48,13 @@ class RecordCursor;
class IDHackStage final : public PlanStage {
public:
/** Takes ownership of all the arguments -collection. */
- IDHackStage(OperationContext* txn,
+ IDHackStage(OperationContext* opCtx,
const Collection* collection,
CanonicalQuery* query,
WorkingSet* ws,
const IndexDescriptor* descriptor);
- IDHackStage(OperationContext* txn,
+ IDHackStage(OperationContext* opCtx,
Collection* collection,
const BSONObj& key,
WorkingSet* ws,
@@ -69,7 +69,7 @@ public:
void doRestoreState() final;
void doDetachFromOperationContext() final;
void doReattachToOperationContext() final;
- void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
+ void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
/**
* ID Hack has a very strict criteria for the queries it supports.
diff --git a/src/mongo/db/exec/index_iterator.cpp b/src/mongo/db/exec/index_iterator.cpp
index 683455cd519..54cb130d6d1 100644
--- a/src/mongo/db/exec/index_iterator.cpp
+++ b/src/mongo/db/exec/index_iterator.cpp
@@ -42,13 +42,13 @@ using stdx::make_unique;
const char* IndexIteratorStage::kStageType = "INDEX_ITERATOR";
-IndexIteratorStage::IndexIteratorStage(OperationContext* txn,
+IndexIteratorStage::IndexIteratorStage(OperationContext* opCtx,
WorkingSet* ws,
Collection* collection,
IndexAccessMethod* iam,
BSONObj keyPattern,
unique_ptr<SortedDataInterface::Cursor> cursor)
- : PlanStage(kStageType, txn),
+ : PlanStage(kStageType, opCtx),
_collection(collection),
_ws(ws),
_iam(iam),
diff --git a/src/mongo/db/exec/index_iterator.h b/src/mongo/db/exec/index_iterator.h
index a39b36c6ca5..3e989eb9aae 100644
--- a/src/mongo/db/exec/index_iterator.h
+++ b/src/mongo/db/exec/index_iterator.h
@@ -44,7 +44,7 @@ class Collection;
*/
class IndexIteratorStage final : public PlanStage {
public:
- IndexIteratorStage(OperationContext* txn,
+ IndexIteratorStage(OperationContext* opCtx,
WorkingSet* ws,
Collection* collection,
IndexAccessMethod* iam,
diff --git a/src/mongo/db/exec/index_scan.cpp b/src/mongo/db/exec/index_scan.cpp
index 767539e65b4..50e4ff56962 100644
--- a/src/mongo/db/exec/index_scan.cpp
+++ b/src/mongo/db/exec/index_scan.cpp
@@ -58,11 +58,11 @@ namespace mongo {
// static
const char* IndexScan::kStageType = "IXSCAN";
-IndexScan::IndexScan(OperationContext* txn,
+IndexScan::IndexScan(OperationContext* opCtx,
const IndexScanParams& params,
WorkingSet* workingSet,
const MatchExpression* filter)
- : PlanStage(kStageType, txn),
+ : PlanStage(kStageType, opCtx),
_workingSet(workingSet),
_iam(params.descriptor->getIndexCatalog()->getIndex(params.descriptor)),
_keyPattern(params.descriptor->keyPattern().getOwned()),
@@ -267,7 +267,7 @@ void IndexScan::doReattachToOperationContext() {
_indexCursor->reattachToOperationContext(getOpCtx());
}
-void IndexScan::doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+void IndexScan::doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) {
// The only state we're responsible for holding is what RecordIds to drop. If a document
// mutates the underlying index cursor will deal with it.
if (INVALIDATION_MUTATION == type) {
diff --git a/src/mongo/db/exec/index_scan.h b/src/mongo/db/exec/index_scan.h
index 1cf2780f24b..7dfdbd4753b 100644
--- a/src/mongo/db/exec/index_scan.h
+++ b/src/mongo/db/exec/index_scan.h
@@ -90,7 +90,7 @@ public:
HIT_END
};
- IndexScan(OperationContext* txn,
+ IndexScan(OperationContext* opCtx,
const IndexScanParams& params,
WorkingSet* workingSet,
const MatchExpression* filter);
@@ -101,7 +101,7 @@ public:
void doRestoreState() final;
void doDetachFromOperationContext() final;
void doReattachToOperationContext() final;
- void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
+ void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
StageType stageType() const final {
return STAGE_IXSCAN;
diff --git a/src/mongo/db/exec/merge_sort.cpp b/src/mongo/db/exec/merge_sort.cpp
index 372417e0463..7010546d1e4 100644
--- a/src/mongo/db/exec/merge_sort.cpp
+++ b/src/mongo/db/exec/merge_sort.cpp
@@ -174,7 +174,7 @@ PlanStage::StageState MergeSortStage::doWork(WorkingSetID* out) {
}
-void MergeSortStage::doInvalidate(OperationContext* txn,
+void MergeSortStage::doInvalidate(OperationContext* opCtx,
const RecordId& dl,
InvalidationType type) {
// Go through our data and see if we're holding on to the invalidated RecordId.
@@ -184,7 +184,7 @@ void MergeSortStage::doInvalidate(OperationContext* txn,
WorkingSetMember* member = _ws->get(valueIt->id);
if (member->hasRecordId() && (dl == member->recordId)) {
// Fetch the about-to-be mutated result.
- WorkingSetCommon::fetchAndInvalidateRecordId(txn, member, _collection);
+ WorkingSetCommon::fetchAndInvalidateRecordId(opCtx, member, _collection);
++_specificStats.forcedFetches;
}
}
diff --git a/src/mongo/db/exec/merge_sort.h b/src/mongo/db/exec/merge_sort.h
index e0ff8d40fee..6a2121a3bdf 100644
--- a/src/mongo/db/exec/merge_sort.h
+++ b/src/mongo/db/exec/merge_sort.h
@@ -66,7 +66,7 @@ public:
bool isEOF() final;
StageState doWork(WorkingSetID* out) final;
- void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
+ void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
StageType stageType() const final {
return STAGE_SORT_MERGE;
diff --git a/src/mongo/db/exec/multi_iterator.cpp b/src/mongo/db/exec/multi_iterator.cpp
index 49129451e42..24b8d375a67 100644
--- a/src/mongo/db/exec/multi_iterator.cpp
+++ b/src/mongo/db/exec/multi_iterator.cpp
@@ -43,10 +43,10 @@ using stdx::make_unique;
const char* MultiIteratorStage::kStageType = "MULTI_ITERATOR";
-MultiIteratorStage::MultiIteratorStage(OperationContext* txn,
+MultiIteratorStage::MultiIteratorStage(OperationContext* opCtx,
WorkingSet* ws,
Collection* collection)
- : PlanStage(kStageType, txn),
+ : PlanStage(kStageType, opCtx),
_collection(collection),
_ws(ws),
_wsidForFetch(_ws->allocate()) {}
@@ -131,13 +131,13 @@ void MultiIteratorStage::doReattachToOperationContext() {
}
}
-void MultiIteratorStage::doInvalidate(OperationContext* txn,
+void MultiIteratorStage::doInvalidate(OperationContext* opCtx,
const RecordId& dl,
InvalidationType type) {
switch (type) {
case INVALIDATION_DELETION:
for (size_t i = 0; i < _iterators.size(); i++) {
- _iterators[i]->invalidate(txn, dl);
+ _iterators[i]->invalidate(opCtx, dl);
}
break;
case INVALIDATION_MUTATION:
diff --git a/src/mongo/db/exec/multi_iterator.h b/src/mongo/db/exec/multi_iterator.h
index ec9ff88c11b..4b446ba17da 100644
--- a/src/mongo/db/exec/multi_iterator.h
+++ b/src/mongo/db/exec/multi_iterator.h
@@ -47,7 +47,7 @@ namespace mongo {
*/
class MultiIteratorStage final : public PlanStage {
public:
- MultiIteratorStage(OperationContext* txn, WorkingSet* ws, Collection* collection);
+ MultiIteratorStage(OperationContext* opCtx, WorkingSet* ws, Collection* collection);
void addIterator(std::unique_ptr<RecordCursor> it);
@@ -61,7 +61,7 @@ public:
void doRestoreState() final;
void doDetachFromOperationContext() final;
void doReattachToOperationContext() final;
- void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
+ void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
// Returns empty PlanStageStats object
std::unique_ptr<PlanStageStats> getStats() final;
@@ -79,7 +79,7 @@ public:
static const char* kStageType;
private:
- OperationContext* _txn;
+ OperationContext* _opCtx;
Collection* _collection;
std::vector<std::unique_ptr<RecordCursor>> _iterators;
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index ba18803a2ac..d4d9d21ca47 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -61,11 +61,11 @@ using stdx::make_unique;
// static
const char* MultiPlanStage::kStageType = "MULTI_PLAN";
-MultiPlanStage::MultiPlanStage(OperationContext* txn,
+MultiPlanStage::MultiPlanStage(OperationContext* opCtx,
const Collection* collection,
CanonicalQuery* cq,
CachingMode cachingMode)
- : PlanStage(kStageType, txn),
+ : PlanStage(kStageType, opCtx),
_collection(collection),
_cachingMode(cachingMode),
_query(cq),
@@ -169,7 +169,7 @@ Status MultiPlanStage::tryYield(PlanYieldPolicy* yieldPolicy) {
}
// static
-size_t MultiPlanStage::getTrialPeriodWorks(OperationContext* txn, const Collection* collection) {
+size_t MultiPlanStage::getTrialPeriodWorks(OperationContext* opCtx, const Collection* collection) {
// Run each plan some number of times. This number is at least as great as
// 'internalQueryPlanEvaluationWorks', but may be larger for big collections.
size_t numWorks = internalQueryPlanEvaluationWorks.load();
@@ -179,7 +179,7 @@ size_t MultiPlanStage::getTrialPeriodWorks(OperationContext* txn, const Collecti
double fraction = internalQueryPlanEvaluationCollFraction;
numWorks = std::max(static_cast<size_t>(internalQueryPlanEvaluationWorks.load()),
- static_cast<size_t>(fraction * collection->numRecords(txn)));
+ static_cast<size_t>(fraction * collection->numRecords(opCtx)));
}
return numWorks;
@@ -405,7 +405,7 @@ bool MultiPlanStage::workAllPlans(size_t numResults, PlanYieldPolicy* yieldPolic
namespace {
-void invalidateHelper(OperationContext* txn,
+void invalidateHelper(OperationContext* opCtx,
WorkingSet* ws, // may flag for review
const RecordId& recordId,
list<WorkingSetID>* idsToInvalidate,
@@ -413,14 +413,14 @@ void invalidateHelper(OperationContext* txn,
for (auto it = idsToInvalidate->begin(); it != idsToInvalidate->end(); ++it) {
WorkingSetMember* member = ws->get(*it);
if (member->hasRecordId() && member->recordId == recordId) {
- WorkingSetCommon::fetchAndInvalidateRecordId(txn, member, collection);
+ WorkingSetCommon::fetchAndInvalidateRecordId(opCtx, member, collection);
}
}
}
} // namespace
-void MultiPlanStage::doInvalidate(OperationContext* txn,
+void MultiPlanStage::doInvalidate(OperationContext* opCtx,
const RecordId& recordId,
InvalidationType type) {
if (_failure) {
@@ -429,15 +429,15 @@ void MultiPlanStage::doInvalidate(OperationContext* txn,
if (bestPlanChosen()) {
CandidatePlan& bestPlan = _candidates[_bestPlanIdx];
- invalidateHelper(txn, bestPlan.ws, recordId, &bestPlan.results, _collection);
+ invalidateHelper(opCtx, bestPlan.ws, recordId, &bestPlan.results, _collection);
if (hasBackupPlan()) {
CandidatePlan& backupPlan = _candidates[_backupPlanIdx];
- invalidateHelper(txn, backupPlan.ws, recordId, &backupPlan.results, _collection);
+ invalidateHelper(opCtx, backupPlan.ws, recordId, &backupPlan.results, _collection);
}
} else {
for (size_t ix = 0; ix < _candidates.size(); ++ix) {
invalidateHelper(
- txn, _candidates[ix].ws, recordId, &_candidates[ix].results, _collection);
+ opCtx, _candidates[ix].ws, recordId, &_candidates[ix].results, _collection);
}
}
}
diff --git a/src/mongo/db/exec/multi_plan.h b/src/mongo/db/exec/multi_plan.h
index 7eca99808db..258e54642ba 100644
--- a/src/mongo/db/exec/multi_plan.h
+++ b/src/mongo/db/exec/multi_plan.h
@@ -75,7 +75,7 @@ public:
* If 'shouldCache' is true, writes a cache entry for the winning plan to the plan cache
* when possible. If 'shouldCache' is false, the plan cache will never be written.
*/
- MultiPlanStage(OperationContext* txn,
+ MultiPlanStage(OperationContext* opCtx,
const Collection* collection,
CanonicalQuery* cq,
CachingMode cachingMode = CachingMode::AlwaysCache);
@@ -84,7 +84,7 @@ public:
StageState doWork(WorkingSetID* out) final;
- void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
+ void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
StageType stageType() const final {
return STAGE_MULTI_PLAN;
@@ -118,7 +118,7 @@ public:
*
* Calculated based on a fixed query knob and the size of the collection.
*/
- static size_t getTrialPeriodWorks(OperationContext* txn, const Collection* collection);
+ static size_t getTrialPeriodWorks(OperationContext* opCtx, const Collection* collection);
/**
* Returns the max number of documents which we should allow any plan to return during the
diff --git a/src/mongo/db/exec/near.cpp b/src/mongo/db/exec/near.cpp
index e3f0ed7781d..31825995c0e 100644
--- a/src/mongo/db/exec/near.cpp
+++ b/src/mongo/db/exec/near.cpp
@@ -41,12 +41,12 @@ using std::unique_ptr;
using std::vector;
using stdx::make_unique;
-NearStage::NearStage(OperationContext* txn,
+NearStage::NearStage(OperationContext* opCtx,
const char* typeName,
StageType type,
WorkingSet* workingSet,
Collection* collection)
- : PlanStage(typeName, txn),
+ : PlanStage(typeName, opCtx),
_workingSet(workingSet),
_collection(collection),
_searchState(SearchState_Initializing),
@@ -283,7 +283,7 @@ bool NearStage::isEOF() {
return SearchState_Finished == _searchState;
}
-void NearStage::doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+void NearStage::doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) {
// If a result is in _resultBuffer and has a RecordId it will be in _seenDocuments as
// well. It's safe to return the result w/o the RecordId, so just fetch the result.
unordered_map<RecordId, WorkingSetID, RecordId::Hasher>::iterator seenIt =
@@ -292,7 +292,7 @@ void NearStage::doInvalidate(OperationContext* txn, const RecordId& dl, Invalida
if (seenIt != _seenDocuments.end()) {
WorkingSetMember* member = _workingSet->get(seenIt->second);
verify(member->hasRecordId());
- WorkingSetCommon::fetchAndInvalidateRecordId(txn, member, _collection);
+ WorkingSetCommon::fetchAndInvalidateRecordId(opCtx, member, _collection);
verify(!member->hasRecordId());
// Don't keep it around in the seen map since there's no valid RecordId anymore
diff --git a/src/mongo/db/exec/near.h b/src/mongo/db/exec/near.h
index d9ccdc4b92a..8ba21895baf 100644
--- a/src/mongo/db/exec/near.h
+++ b/src/mongo/db/exec/near.h
@@ -100,7 +100,7 @@ public:
bool isEOF() final;
StageState doWork(WorkingSetID* out) final;
- void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
+ void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
StageType stageType() const final;
std::unique_ptr<PlanStageStats> getStats() final;
@@ -110,7 +110,7 @@ protected:
/**
* Subclasses of NearStage must provide basics + a stats object which gets owned here.
*/
- NearStage(OperationContext* txn,
+ NearStage(OperationContext* opCtx,
const char* typeName,
StageType type,
WorkingSet* workingSet,
@@ -127,7 +127,7 @@ protected:
*
* Returns !OK on failure to create next stage.
*/
- virtual StatusWith<CoveredInterval*> nextInterval(OperationContext* txn,
+ virtual StatusWith<CoveredInterval*> nextInterval(OperationContext* opCtx,
WorkingSet* workingSet,
Collection* collection) = 0;
@@ -146,7 +146,7 @@ protected:
* Return errors if an error occurs.
* Can't return ADVANCED.
*/
- virtual StageState initialize(OperationContext* txn,
+ virtual StageState initialize(OperationContext* opCtx,
WorkingSet* workingSet,
Collection* collection,
WorkingSetID* out) = 0;
diff --git a/src/mongo/db/exec/oplogstart.cpp b/src/mongo/db/exec/oplogstart.cpp
index 3b96dcac89d..8a8480ce136 100644
--- a/src/mongo/db/exec/oplogstart.cpp
+++ b/src/mongo/db/exec/oplogstart.cpp
@@ -43,11 +43,11 @@ using stdx::make_unique;
const char* OplogStart::kStageType = "OPLOG_START";
// Does not take ownership.
-OplogStart::OplogStart(OperationContext* txn,
+OplogStart::OplogStart(OperationContext* opCtx,
const Collection* collection,
MatchExpression* filter,
WorkingSet* ws)
- : PlanStage(kStageType, txn),
+ : PlanStage(kStageType, opCtx),
_needInit(true),
_backwardsScanning(false),
_extentHopping(false),
@@ -165,7 +165,7 @@ bool OplogStart::isEOF() {
return _done;
}
-void OplogStart::doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+void OplogStart::doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) {
if (_needInit) {
return;
}
@@ -175,7 +175,7 @@ void OplogStart::doInvalidate(OperationContext* txn, const RecordId& dl, Invalid
}
for (size_t i = 0; i < _subIterators.size(); i++) {
- _subIterators[i]->invalidate(txn, dl);
+ _subIterators[i]->invalidate(opCtx, dl);
}
}
diff --git a/src/mongo/db/exec/oplogstart.h b/src/mongo/db/exec/oplogstart.h
index 4fd5df13eb8..9e4df5acd77 100644
--- a/src/mongo/db/exec/oplogstart.h
+++ b/src/mongo/db/exec/oplogstart.h
@@ -63,7 +63,7 @@ class RecordCursor;
class OplogStart final : public PlanStage {
public:
// Does not take ownership.
- OplogStart(OperationContext* txn,
+ OplogStart(OperationContext* opCtx,
const Collection* collection,
MatchExpression* filter,
WorkingSet* ws);
@@ -71,7 +71,7 @@ public:
StageState doWork(WorkingSetID* out) final;
bool isEOF() final;
- void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
+ void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
void doSaveState() final;
void doRestoreState() final;
void doDetachFromOperationContext() final;
diff --git a/src/mongo/db/exec/or.cpp b/src/mongo/db/exec/or.cpp
index f9d51880c2f..224a7e47112 100644
--- a/src/mongo/db/exec/or.cpp
+++ b/src/mongo/db/exec/or.cpp
@@ -120,7 +120,7 @@ PlanStage::StageState OrStage::doWork(WorkingSetID* out) {
return childStatus;
}
-void OrStage::doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+void OrStage::doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) {
// TODO remove this since calling isEOF is illegal inside of doInvalidate().
if (isEOF()) {
return;
diff --git a/src/mongo/db/exec/or.h b/src/mongo/db/exec/or.h
index ea056d7db91..c97f9d34909 100644
--- a/src/mongo/db/exec/or.h
+++ b/src/mongo/db/exec/or.h
@@ -53,7 +53,7 @@ public:
StageState doWork(WorkingSetID* out) final;
- void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
+ void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
StageType stageType() const final {
return STAGE_OR;
diff --git a/src/mongo/db/exec/plan_stage.cpp b/src/mongo/db/exec/plan_stage.cpp
index 13062fa919f..c532242264e 100644
--- a/src/mongo/db/exec/plan_stage.cpp
+++ b/src/mongo/db/exec/plan_stage.cpp
@@ -74,13 +74,13 @@ void PlanStage::restoreState() {
doRestoreState();
}
-void PlanStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+void PlanStage::invalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) {
++_commonStats.invalidates;
for (auto&& child : _children) {
- child->invalidate(txn, dl, type);
+ child->invalidate(opCtx, dl, type);
}
- doInvalidate(txn, dl, type);
+ doInvalidate(opCtx, dl, type);
}
void PlanStage::detachFromOperationContext() {
diff --git a/src/mongo/db/exec/plan_stage.h b/src/mongo/db/exec/plan_stage.h
index c0afd992e72..6b3a67a2c04 100644
--- a/src/mongo/db/exec/plan_stage.h
+++ b/src/mongo/db/exec/plan_stage.h
@@ -259,7 +259,7 @@ public:
*
* Propagates to all children, then calls doInvalidate().
*/
- void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ void invalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type);
/**
* Retrieve a list of this stage's children. This stage keeps ownership of
@@ -356,7 +356,7 @@ protected:
/**
* Does the stage-specific invalidation work.
*/
- virtual void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {}
+ virtual void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) {}
ClockSource* getClock() const;
diff --git a/src/mongo/db/exec/sort.cpp b/src/mongo/db/exec/sort.cpp
index 2fd40f0efb2..bd2cc27e14f 100644
--- a/src/mongo/db/exec/sort.cpp
+++ b/src/mongo/db/exec/sort.cpp
@@ -193,7 +193,7 @@ PlanStage::StageState SortStage::doWork(WorkingSetID* out) {
return PlanStage::ADVANCED;
}
-void SortStage::doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+void SortStage::doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) {
// If we have a deletion, we can fetch and carry on.
// If we have a mutation, it's easier to fetch and use the previous document.
// So, no matter what, fetch and keep the doc in play.
@@ -209,7 +209,7 @@ void SortStage::doInvalidate(OperationContext* txn, const RecordId& dl, Invalida
WorkingSetMember* member = _ws->get(it->second);
verify(member->recordId == dl);
- WorkingSetCommon::fetchAndInvalidateRecordId(txn, member, _collection);
+ WorkingSetCommon::fetchAndInvalidateRecordId(opCtx, member, _collection);
// Remove the RecordId from our set of active DLs.
_wsidByRecordId.erase(it);
diff --git a/src/mongo/db/exec/sort.h b/src/mongo/db/exec/sort.h
index 4cf91604025..b65f5d4b824 100644
--- a/src/mongo/db/exec/sort.h
+++ b/src/mongo/db/exec/sort.h
@@ -77,7 +77,7 @@ public:
bool isEOF() final;
StageState doWork(WorkingSetID* out) final;
- void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
+ void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
StageType stageType() const final {
return STAGE_SORT;
diff --git a/src/mongo/db/exec/sort_key_generator.cpp b/src/mongo/db/exec/sort_key_generator.cpp
index 76e6ba70634..bdd37af66aa 100644
--- a/src/mongo/db/exec/sort_key_generator.cpp
+++ b/src/mongo/db/exec/sort_key_generator.cpp
@@ -52,7 +52,7 @@ namespace mongo {
// SortKeyGenerator
//
-SortKeyGenerator::SortKeyGenerator(OperationContext* txn,
+SortKeyGenerator::SortKeyGenerator(OperationContext* opCtx,
const BSONObj& sortSpec,
const BSONObj& queryObj,
const CollatorInterface* collator)
@@ -105,7 +105,7 @@ SortKeyGenerator::SortKeyGenerator(OperationContext* txn,
_keyGen.reset(new BtreeKeyGeneratorV1(fieldNames, fixed, false /* not sparse */, _collator));
// The bounds checker only works on the Btree part of the sort key.
- getBoundsForSort(txn, queryObj, _btreeObj);
+ getBoundsForSort(opCtx, queryObj, _btreeObj);
if (_hasBounds) {
_boundsChecker.reset(new IndexBoundsChecker(&_bounds, _btreeObj, 1 /* == order */));
@@ -227,7 +227,7 @@ StatusWith<BSONObj> SortKeyGenerator::getSortKeyFromObject(const WorkingSetMembe
return *keys.begin();
}
-void SortKeyGenerator::getBoundsForSort(OperationContext* txn,
+void SortKeyGenerator::getBoundsForSort(OperationContext* opCtx,
const BSONObj& queryObj,
const BSONObj& sortObj) {
QueryPlannerParams params;
@@ -254,7 +254,7 @@ void SortKeyGenerator::getBoundsForSort(OperationContext* txn,
}
auto statusWithQueryForSort =
- CanonicalQuery::canonicalize(txn, std::move(qr), ExtensionsCallbackNoop());
+ CanonicalQuery::canonicalize(opCtx, std::move(qr), ExtensionsCallbackNoop());
verify(statusWithQueryForSort.isOK());
std::unique_ptr<CanonicalQuery> queryForSort = std::move(statusWithQueryForSort.getValue());
diff --git a/src/mongo/db/exec/sort_key_generator.h b/src/mongo/db/exec/sort_key_generator.h
index f7684a038c3..d284bad443d 100644
--- a/src/mongo/db/exec/sort_key_generator.h
+++ b/src/mongo/db/exec/sort_key_generator.h
@@ -54,10 +54,10 @@ public:
* ensure that the value we select to sort by is within bounds generated by
* executing 'queryObj' using the virtual index with key pattern 'sortSpec'.
*
- * 'txn' must point to a valid OperationContext, but 'txn' does not need to outlive the
+ * 'opCtx' must point to a valid OperationContext, but 'opCtx' does not need to outlive the
* constructed SortKeyGenerator.
*/
- SortKeyGenerator(OperationContext* txn,
+ SortKeyGenerator(OperationContext* opCtx,
const BSONObj& sortSpec,
const BSONObj& queryObj,
const CollatorInterface* collator);
@@ -83,7 +83,7 @@ private:
*
* Populates _hasBounds and _bounds.
*/
- void getBoundsForSort(OperationContext* txn, const BSONObj& queryObj, const BSONObj& sortObj);
+ void getBoundsForSort(OperationContext* opCtx, const BSONObj& queryObj, const BSONObj& sortObj);
const CollatorInterface* _collator;
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index 926cca4c485..72949a80598 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -137,7 +137,7 @@ public:
// check needed.
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -164,8 +164,8 @@ public:
// TODO A write lock is currently taken here to accommodate stages that perform writes
// (e.g. DeleteStage). This should be changed to use a read lock for read-only
// execution trees.
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetCollection autoColl(txn, nss, MODE_IX);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ AutoGetCollection autoColl(opCtx, nss, MODE_IX);
// Make sure the collection is valid.
Collection* collection = autoColl.getCollection();
@@ -184,16 +184,16 @@ public:
OwnedPointerVector<MatchExpression> exprs;
unique_ptr<WorkingSet> ws(new WorkingSet());
- PlanStage* userRoot = parseQuery(txn, collection, planObj, ws.get(), &exprs);
+ PlanStage* userRoot = parseQuery(opCtx, collection, planObj, ws.get(), &exprs);
uassert(16911, "Couldn't parse plan from " + cmdObj.toString(), NULL != userRoot);
// Add a fetch at the top for the user so we can get obj back for sure.
// TODO: Do we want to do this for the user? I think so.
unique_ptr<PlanStage> rootFetch =
- make_unique<FetchStage>(txn, ws.get(), userRoot, nullptr, collection);
+ make_unique<FetchStage>(opCtx, ws.get(), userRoot, nullptr, collection);
auto statusWithPlanExecutor = PlanExecutor::make(
- txn, std::move(ws), std::move(rootFetch), collection, PlanExecutor::YIELD_AUTO);
+ opCtx, std::move(ws), std::move(rootFetch), collection, PlanExecutor::YIELD_AUTO);
fassert(28536, statusWithPlanExecutor.getStatus());
std::unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
@@ -223,7 +223,7 @@ public:
return true;
}
- PlanStage* parseQuery(OperationContext* txn,
+ PlanStage* parseQuery(OperationContext* opCtx,
Collection* collection,
BSONObj obj,
WorkingSet* workingSet,
@@ -251,7 +251,7 @@ public:
if (filterTag == e.fieldName()) {
const CollatorInterface* collator = nullptr;
StatusWithMatchExpression statusWithMatcher = MatchExpressionParser::parse(
- argObj, ExtensionsCallbackReal(txn, &collection->ns()), collator);
+ argObj, ExtensionsCallbackReal(opCtx, &collection->ns()), collator);
if (!statusWithMatcher.isOK()) {
return NULL;
}
@@ -279,7 +279,7 @@ public:
BSONObj keyPatternObj = keyPatternElement.Obj();
std::vector<IndexDescriptor*> indexes;
collection->getIndexCatalog()->findIndexesByKeyPattern(
- txn, keyPatternObj, false, &indexes);
+ opCtx, keyPatternObj, false, &indexes);
uassert(16890,
str::stream() << "Can't find index: " << keyPatternObj,
!indexes.empty());
@@ -297,7 +297,7 @@ public:
str::stream() << "Index 'name' must be a string in: " << nodeArgs,
nodeArgs["name"].type() == BSONType::String);
StringData name = nodeArgs["name"].valueStringData();
- desc = collection->getIndexCatalog()->findIndexByName(txn, name);
+ desc = collection->getIndexCatalog()->findIndexByName(opCtx, name);
uassert(40223, str::stream() << "Can't find index: " << name.toString(), desc);
}
@@ -310,12 +310,12 @@ public:
nodeArgs["startKeyInclusive"].Bool(), nodeArgs["endKeyInclusive"].Bool());
params.direction = nodeArgs["direction"].numberInt();
- return new IndexScan(txn, params, workingSet, matcher);
+ return new IndexScan(opCtx, params, workingSet, matcher);
} else if ("andHash" == nodeName) {
uassert(
16921, "Nodes argument must be provided to AND", nodeArgs["nodes"].isABSONObj());
- auto andStage = make_unique<AndHashStage>(txn, workingSet, collection);
+ auto andStage = make_unique<AndHashStage>(opCtx, workingSet, collection);
int nodesAdded = 0;
BSONObjIterator it(nodeArgs["nodes"].Obj());
@@ -323,7 +323,7 @@ public:
BSONElement e = it.next();
uassert(16922, "node of AND isn't an obj?: " + e.toString(), e.isABSONObj());
- PlanStage* subNode = parseQuery(txn, collection, e.Obj(), workingSet, exprs);
+ PlanStage* subNode = parseQuery(opCtx, collection, e.Obj(), workingSet, exprs);
uassert(
16923, "Can't parse sub-node of AND: " + e.Obj().toString(), NULL != subNode);
// takes ownership
@@ -338,7 +338,7 @@ public:
uassert(
16924, "Nodes argument must be provided to AND", nodeArgs["nodes"].isABSONObj());
- auto andStage = make_unique<AndSortedStage>(txn, workingSet, collection);
+ auto andStage = make_unique<AndSortedStage>(opCtx, workingSet, collection);
int nodesAdded = 0;
BSONObjIterator it(nodeArgs["nodes"].Obj());
@@ -346,7 +346,7 @@ public:
BSONElement e = it.next();
uassert(16925, "node of AND isn't an obj?: " + e.toString(), e.isABSONObj());
- PlanStage* subNode = parseQuery(txn, collection, e.Obj(), workingSet, exprs);
+ PlanStage* subNode = parseQuery(opCtx, collection, e.Obj(), workingSet, exprs);
uassert(
16926, "Can't parse sub-node of AND: " + e.Obj().toString(), NULL != subNode);
// takes ownership
@@ -362,13 +362,14 @@ public:
16934, "Nodes argument must be provided to AND", nodeArgs["nodes"].isABSONObj());
uassert(16935, "Dedup argument must be provided to OR", !nodeArgs["dedup"].eoo());
BSONObjIterator it(nodeArgs["nodes"].Obj());
- auto orStage = make_unique<OrStage>(txn, workingSet, nodeArgs["dedup"].Bool(), matcher);
+ auto orStage =
+ make_unique<OrStage>(opCtx, workingSet, nodeArgs["dedup"].Bool(), matcher);
while (it.more()) {
BSONElement e = it.next();
if (!e.isABSONObj()) {
return NULL;
}
- PlanStage* subNode = parseQuery(txn, collection, e.Obj(), workingSet, exprs);
+ PlanStage* subNode = parseQuery(opCtx, collection, e.Obj(), workingSet, exprs);
uassert(
16936, "Can't parse sub-node of OR: " + e.Obj().toString(), NULL != subNode);
// takes ownership
@@ -380,11 +381,11 @@ public:
uassert(
16929, "Node argument must be provided to fetch", nodeArgs["node"].isABSONObj());
PlanStage* subNode =
- parseQuery(txn, collection, nodeArgs["node"].Obj(), workingSet, exprs);
+ parseQuery(opCtx, collection, nodeArgs["node"].Obj(), workingSet, exprs);
uassert(28731,
"Can't parse sub-node of FETCH: " + nodeArgs["node"].Obj().toString(),
NULL != subNode);
- return new FetchStage(txn, workingSet, subNode, matcher, collection);
+ return new FetchStage(opCtx, workingSet, subNode, matcher, collection);
} else if ("limit" == nodeName) {
uassert(
16937, "Limit stage doesn't have a filter (put it on the child)", NULL == matcher);
@@ -392,22 +393,22 @@ public:
16930, "Node argument must be provided to limit", nodeArgs["node"].isABSONObj());
uassert(16931, "Num argument must be provided to limit", nodeArgs["num"].isNumber());
PlanStage* subNode =
- parseQuery(txn, collection, nodeArgs["node"].Obj(), workingSet, exprs);
+ parseQuery(opCtx, collection, nodeArgs["node"].Obj(), workingSet, exprs);
uassert(28732,
"Can't parse sub-node of LIMIT: " + nodeArgs["node"].Obj().toString(),
NULL != subNode);
- return new LimitStage(txn, nodeArgs["num"].numberInt(), workingSet, subNode);
+ return new LimitStage(opCtx, nodeArgs["num"].numberInt(), workingSet, subNode);
} else if ("skip" == nodeName) {
uassert(
16938, "Skip stage doesn't have a filter (put it on the child)", NULL == matcher);
uassert(16932, "Node argument must be provided to skip", nodeArgs["node"].isABSONObj());
uassert(16933, "Num argument must be provided to skip", nodeArgs["num"].isNumber());
PlanStage* subNode =
- parseQuery(txn, collection, nodeArgs["node"].Obj(), workingSet, exprs);
+ parseQuery(opCtx, collection, nodeArgs["node"].Obj(), workingSet, exprs);
uassert(28733,
"Can't parse sub-node of SKIP: " + nodeArgs["node"].Obj().toString(),
NULL != subNode);
- return new SkipStage(txn, nodeArgs["num"].numberInt(), workingSet, subNode);
+ return new SkipStage(opCtx, nodeArgs["num"].numberInt(), workingSet, subNode);
} else if ("cscan" == nodeName) {
CollectionScanParams params;
params.collection = collection;
@@ -422,7 +423,7 @@ public:
params.direction = CollectionScanParams::BACKWARD;
}
- return new CollectionScan(txn, params, workingSet, matcher);
+ return new CollectionScan(opCtx, params, workingSet, matcher);
}
// sort is disabled for now.
#if 0
@@ -431,7 +432,7 @@ public:
nodeArgs["node"].isABSONObj());
uassert(16970, "Pattern argument must be provided to sort",
nodeArgs["pattern"].isABSONObj());
- PlanStage* subNode = parseQuery(txn, db, nodeArgs["node"].Obj(), workingSet, exprs);
+ PlanStage* subNode = parseQuery(opCtx, db, nodeArgs["node"].Obj(), workingSet, exprs);
SortStageParams params;
params.pattern = nodeArgs["pattern"].Obj();
return new SortStage(params, workingSet, subNode);
@@ -448,14 +449,14 @@ public:
params.pattern = nodeArgs["pattern"].Obj();
// Dedup is true by default.
- auto mergeStage = make_unique<MergeSortStage>(txn, params, workingSet, collection);
+ auto mergeStage = make_unique<MergeSortStage>(opCtx, params, workingSet, collection);
BSONObjIterator it(nodeArgs["nodes"].Obj());
while (it.more()) {
BSONElement e = it.next();
uassert(16973, "node of mergeSort isn't an obj?: " + e.toString(), e.isABSONObj());
- PlanStage* subNode = parseQuery(txn, collection, e.Obj(), workingSet, exprs);
+ PlanStage* subNode = parseQuery(opCtx, collection, e.Obj(), workingSet, exprs);
uassert(16974,
"Can't parse sub-node of mergeSort: " + e.Obj().toString(),
NULL != subNode);
@@ -467,7 +468,7 @@ public:
string search = nodeArgs["search"].String();
vector<IndexDescriptor*> idxMatches;
- collection->getIndexCatalog()->findIndexByType(txn, "text", idxMatches);
+ collection->getIndexCatalog()->findIndexByType(opCtx, "text", idxMatches);
uassert(17194, "Expected exactly one text index", idxMatches.size() == 1);
IndexDescriptor* index = idxMatches[0];
@@ -494,7 +495,7 @@ public:
return NULL;
}
- return new TextStage(txn, params, workingSet, matcher);
+ return new TextStage(opCtx, params, workingSet, matcher);
} else if ("delete" == nodeName) {
uassert(
18636, "Delete stage doesn't have a filter (put it on the child)", NULL == matcher);
@@ -504,13 +505,13 @@ public:
"isMulti argument must be provided to delete",
nodeArgs["isMulti"].type() == Bool);
PlanStage* subNode =
- parseQuery(txn, collection, nodeArgs["node"].Obj(), workingSet, exprs);
+ parseQuery(opCtx, collection, nodeArgs["node"].Obj(), workingSet, exprs);
uassert(28734,
"Can't parse sub-node of DELETE: " + nodeArgs["node"].Obj().toString(),
NULL != subNode);
DeleteStageParams params;
params.isMulti = nodeArgs["isMulti"].Bool();
- return new DeleteStage(txn, params, workingSet, collection, subNode);
+ return new DeleteStage(opCtx, params, workingSet, collection, subNode);
} else {
return NULL;
}
diff --git a/src/mongo/db/exec/subplan.cpp b/src/mongo/db/exec/subplan.cpp
index 3c7122f3fe1..4fa4f8d7ad0 100644
--- a/src/mongo/db/exec/subplan.cpp
+++ b/src/mongo/db/exec/subplan.cpp
@@ -56,12 +56,12 @@ using stdx::make_unique;
const char* SubplanStage::kStageType = "SUBPLAN";
-SubplanStage::SubplanStage(OperationContext* txn,
+SubplanStage::SubplanStage(OperationContext* opCtx,
Collection* collection,
WorkingSet* ws,
const QueryPlannerParams& params,
CanonicalQuery* cq)
- : PlanStage(kStageType, txn),
+ : PlanStage(kStageType, opCtx),
_collection(collection),
_ws(ws),
_plannerParams(params),
diff --git a/src/mongo/db/exec/subplan.h b/src/mongo/db/exec/subplan.h
index a9b356ff4a6..38625a15276 100644
--- a/src/mongo/db/exec/subplan.h
+++ b/src/mongo/db/exec/subplan.h
@@ -67,7 +67,7 @@ class OperationContext;
*/
class SubplanStage final : public PlanStage {
public:
- SubplanStage(OperationContext* txn,
+ SubplanStage(OperationContext* opCtx,
Collection* collection,
WorkingSet* ws,
const QueryPlannerParams& params,
diff --git a/src/mongo/db/exec/text.cpp b/src/mongo/db/exec/text.cpp
index a290b8f3095..f698dbd0dc8 100644
--- a/src/mongo/db/exec/text.cpp
+++ b/src/mongo/db/exec/text.cpp
@@ -55,12 +55,12 @@ using fts::MAX_WEIGHT;
const char* TextStage::kStageType = "TEXT";
-TextStage::TextStage(OperationContext* txn,
+TextStage::TextStage(OperationContext* opCtx,
const TextStageParams& params,
WorkingSet* ws,
const MatchExpression* filter)
- : PlanStage(kStageType, txn), _params(params) {
- _children.emplace_back(buildTextTree(txn, ws, filter));
+ : PlanStage(kStageType, opCtx), _params(params) {
+ _children.emplace_back(buildTextTree(opCtx, ws, filter));
_specificStats.indexPrefix = _params.indexPrefix;
_specificStats.indexName = _params.index->indexName();
_specificStats.parsedTextQuery = _params.query.toBSON();
@@ -92,10 +92,10 @@ const SpecificStats* TextStage::getSpecificStats() const {
return &_specificStats;
}
-unique_ptr<PlanStage> TextStage::buildTextTree(OperationContext* txn,
+unique_ptr<PlanStage> TextStage::buildTextTree(OperationContext* opCtx,
WorkingSet* ws,
const MatchExpression* filter) const {
- auto textScorer = make_unique<TextOrStage>(txn, _params.spec, ws, filter, _params.index);
+ auto textScorer = make_unique<TextOrStage>(opCtx, _params.spec, ws, filter, _params.index);
// Get all the index scans for each term in our query.
for (const auto& term : _params.query.getTermsForBounds()) {
@@ -110,11 +110,11 @@ unique_ptr<PlanStage> TextStage::buildTextTree(OperationContext* txn,
ixparams.descriptor = _params.index;
ixparams.direction = -1;
- textScorer->addChild(make_unique<IndexScan>(txn, ixparams, ws, nullptr));
+ textScorer->addChild(make_unique<IndexScan>(opCtx, ixparams, ws, nullptr));
}
auto matcher =
- make_unique<TextMatchStage>(txn, std::move(textScorer), _params.query, _params.spec, ws);
+ make_unique<TextMatchStage>(opCtx, std::move(textScorer), _params.query, _params.spec, ws);
unique_ptr<PlanStage> treeRoot = std::move(matcher);
return treeRoot;
diff --git a/src/mongo/db/exec/text.h b/src/mongo/db/exec/text.h
index 6335c1cf2d7..d31f42c6e5a 100644
--- a/src/mongo/db/exec/text.h
+++ b/src/mongo/db/exec/text.h
@@ -71,7 +71,7 @@ struct TextStageParams {
*/
class TextStage final : public PlanStage {
public:
- TextStage(OperationContext* txn,
+ TextStage(OperationContext* opCtx,
const TextStageParams& params,
WorkingSet* ws,
const MatchExpression* filter);
@@ -94,7 +94,7 @@ private:
/**
* Helper method to built the query execution plan for the text stage.
*/
- unique_ptr<PlanStage> buildTextTree(OperationContext* txn,
+ unique_ptr<PlanStage> buildTextTree(OperationContext* opCtx,
WorkingSet* ws,
const MatchExpression* filter) const;
diff --git a/src/mongo/db/exec/text_or.cpp b/src/mongo/db/exec/text_or.cpp
index 195af913a99..5666feea046 100644
--- a/src/mongo/db/exec/text_or.cpp
+++ b/src/mongo/db/exec/text_or.cpp
@@ -54,12 +54,12 @@ using fts::FTSSpec;
const char* TextOrStage::kStageType = "TEXT_OR";
-TextOrStage::TextOrStage(OperationContext* txn,
+TextOrStage::TextOrStage(OperationContext* opCtx,
const FTSSpec& ftsSpec,
WorkingSet* ws,
const MatchExpression* filter,
IndexDescriptor* index)
- : PlanStage(kStageType, txn),
+ : PlanStage(kStageType, opCtx),
_ftsSpec(ftsSpec),
_ws(ws),
_scoreIterator(_scores.end()),
@@ -99,7 +99,7 @@ void TextOrStage::doReattachToOperationContext() {
_recordCursor->reattachToOperationContext(getOpCtx());
}
-void TextOrStage::doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+void TextOrStage::doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) {
// Remove the RecordID from the ScoreMap.
ScoreMap::iterator scoreIt = _scores.find(dl);
if (scoreIt != _scores.end()) {
@@ -256,13 +256,13 @@ PlanStage::StageState TextOrStage::returnResults(WorkingSetID* out) {
*/
class TextMatchableDocument : public MatchableDocument {
public:
- TextMatchableDocument(OperationContext* txn,
+ TextMatchableDocument(OperationContext* opCtx,
const BSONObj& keyPattern,
const BSONObj& key,
WorkingSet* ws,
WorkingSetID id,
unowned_ptr<SeekableRecordCursor> recordCursor)
- : _txn(txn),
+ : _opCtx(opCtx),
_recordCursor(recordCursor),
_keyPattern(keyPattern),
_key(key),
@@ -308,7 +308,7 @@ public:
private:
BSONObj getObj() const {
- if (!WorkingSetCommon::fetchIfUnfetched(_txn, _ws, _id, _recordCursor))
+ if (!WorkingSetCommon::fetchIfUnfetched(_opCtx, _ws, _id, _recordCursor))
throw DocumentDeletedException();
WorkingSetMember* member = _ws->get(_id);
@@ -318,7 +318,7 @@ private:
return member->obj.value();
}
- OperationContext* _txn;
+ OperationContext* _opCtx;
unowned_ptr<SeekableRecordCursor> _recordCursor;
BSONObj _keyPattern;
BSONObj _key;
diff --git a/src/mongo/db/exec/text_or.h b/src/mongo/db/exec/text_or.h
index f977d11645e..b40c069cc18 100644
--- a/src/mongo/db/exec/text_or.h
+++ b/src/mongo/db/exec/text_or.h
@@ -72,7 +72,7 @@ public:
kDone,
};
- TextOrStage(OperationContext* txn,
+ TextOrStage(OperationContext* opCtx,
const FTSSpec& ftsSpec,
WorkingSet* ws,
const MatchExpression* filter,
@@ -89,7 +89,7 @@ public:
void doRestoreState() final;
void doDetachFromOperationContext() final;
void doReattachToOperationContext() final;
- void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) final;
+ void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
StageType stageType() const final {
return STAGE_TEXT_OR;
diff --git a/src/mongo/db/exec/update.cpp b/src/mongo/db/exec/update.cpp
index a1b43a2e21d..cced1c41fe6 100644
--- a/src/mongo/db/exec/update.cpp
+++ b/src/mongo/db/exec/update.cpp
@@ -435,8 +435,9 @@ bool shouldRestartUpdateIfNoLongerMatches(const UpdateStageParams& params) {
return params.request->shouldReturnAnyDocs() && !params.request->getSort().isEmpty();
};
-const std::vector<FieldRef*>* getImmutableFields(OperationContext* txn, const NamespaceString& ns) {
- auto metadata = CollectionShardingState::get(txn, ns)->getMetadata();
+const std::vector<FieldRef*>* getImmutableFields(OperationContext* opCtx,
+ const NamespaceString& ns) {
+ auto metadata = CollectionShardingState::get(opCtx, ns)->getMetadata();
if (metadata) {
const std::vector<FieldRef*>& fields = metadata->getKeyPatternFields();
// Return shard-keys as immutable for the update system.
@@ -449,12 +450,12 @@ const std::vector<FieldRef*>* getImmutableFields(OperationContext* txn, const Na
const char* UpdateStage::kStageType = "UPDATE";
-UpdateStage::UpdateStage(OperationContext* txn,
+UpdateStage::UpdateStage(OperationContext* opCtx,
const UpdateStageParams& params,
WorkingSet* ws,
Collection* collection,
PlanStage* child)
- : PlanStage(kStageType, txn),
+ : PlanStage(kStageType, opCtx),
_params(params),
_ws(ws),
_collection(collection),
@@ -649,7 +650,7 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, Reco
return newObj;
}
-Status UpdateStage::applyUpdateOpsForInsert(OperationContext* txn,
+Status UpdateStage::applyUpdateOpsForInsert(OperationContext* opCtx,
const CanonicalQuery* cq,
const BSONObj& query,
UpdateDriver* driver,
@@ -667,7 +668,7 @@ Status UpdateStage::applyUpdateOpsForInsert(OperationContext* txn,
const vector<FieldRef*>* immutablePaths = NULL;
if (!isInternalRequest)
- immutablePaths = getImmutableFields(txn, ns);
+ immutablePaths = getImmutableFields(opCtx, ns);
// The original document we compare changes to - immutable paths must not change
BSONObj original;
diff --git a/src/mongo/db/exec/update.h b/src/mongo/db/exec/update.h
index f1703407df1..e88a26e445b 100644
--- a/src/mongo/db/exec/update.h
+++ b/src/mongo/db/exec/update.h
@@ -77,7 +77,7 @@ class UpdateStage final : public PlanStage {
MONGO_DISALLOW_COPYING(UpdateStage);
public:
- UpdateStage(OperationContext* txn,
+ UpdateStage(OperationContext* opCtx,
const UpdateStageParams& params,
WorkingSet* ws,
Collection* collection,
@@ -134,7 +134,7 @@ public:
*
* Returns the document to insert in *out.
*/
- static Status applyUpdateOpsForInsert(OperationContext* txn,
+ static Status applyUpdateOpsForInsert(OperationContext* opCtx,
const CanonicalQuery* cq,
const BSONObj& query,
UpdateDriver* driver,
diff --git a/src/mongo/db/exec/working_set_common.cpp b/src/mongo/db/exec/working_set_common.cpp
index a9035311bba..c4fccc9267c 100644
--- a/src/mongo/db/exec/working_set_common.cpp
+++ b/src/mongo/db/exec/working_set_common.cpp
@@ -41,7 +41,7 @@
namespace mongo {
// static
-bool WorkingSetCommon::fetchAndInvalidateRecordId(OperationContext* txn,
+bool WorkingSetCommon::fetchAndInvalidateRecordId(OperationContext* opCtx,
WorkingSetMember* member,
const Collection* collection) {
// Already in our desired state.
@@ -55,7 +55,7 @@ bool WorkingSetCommon::fetchAndInvalidateRecordId(OperationContext* txn,
}
// Do the fetch, invalidate the DL.
- member->obj = collection->docFor(txn, member->recordId);
+ member->obj = collection->docFor(opCtx, member->recordId);
member->obj.setValue(member->obj.value().getOwned());
member->recordId = RecordId();
member->transitionToOwnedObj();
@@ -86,7 +86,7 @@ void WorkingSetCommon::prepareForSnapshotChange(WorkingSet* workingSet) {
}
// static
-bool WorkingSetCommon::fetch(OperationContext* txn,
+bool WorkingSetCommon::fetch(OperationContext* opCtx,
WorkingSet* workingSet,
WorkingSetID id,
unowned_ptr<SeekableRecordCursor> cursor) {
@@ -105,7 +105,7 @@ bool WorkingSetCommon::fetch(OperationContext* txn,
return false;
}
- member->obj = {txn->recoveryUnit()->getSnapshotId(), record->data.releaseToBson()};
+ member->obj = {opCtx->recoveryUnit()->getSnapshotId(), record->data.releaseToBson()};
if (member->isSuspicious) {
// Make sure that all of the keyData is still valid for this copy of the document.
diff --git a/src/mongo/db/exec/working_set_common.h b/src/mongo/db/exec/working_set_common.h
index 5ed238ce27f..61c0e04af15 100644
--- a/src/mongo/db/exec/working_set_common.h
+++ b/src/mongo/db/exec/working_set_common.h
@@ -45,7 +45,7 @@ public:
* Requires either a valid BSONObj or valid RecordId.
* Returns true if the fetch and invalidate succeeded, false otherwise.
*/
- static bool fetchAndInvalidateRecordId(OperationContext* txn,
+ static bool fetchAndInvalidateRecordId(OperationContext* opCtx,
WorkingSetMember* member,
const Collection* collection);
@@ -70,19 +70,19 @@ public:
*
* WriteConflict exceptions may be thrown. When they are, 'member' will be unmodified.
*/
- static bool fetch(OperationContext* txn,
+ static bool fetch(OperationContext* opCtx,
WorkingSet* workingSet,
WorkingSetID id,
unowned_ptr<SeekableRecordCursor> cursor);
- static bool fetchIfUnfetched(OperationContext* txn,
+ static bool fetchIfUnfetched(OperationContext* opCtx,
WorkingSet* workingSet,
WorkingSetID id,
unowned_ptr<SeekableRecordCursor> cursor) {
WorkingSetMember* member = workingSet->get(id);
if (member->hasObj())
return true;
- return fetch(txn, workingSet, id, cursor);
+ return fetch(opCtx, workingSet, id, cursor);
}
/**
diff --git a/src/mongo/db/exec/write_stage_common.cpp b/src/mongo/db/exec/write_stage_common.cpp
index 52bf1724320..8d48801f2db 100644
--- a/src/mongo/db/exec/write_stage_common.cpp
+++ b/src/mongo/db/exec/write_stage_common.cpp
@@ -40,17 +40,17 @@ namespace mongo {
namespace write_stage_common {
bool ensureStillMatches(const Collection* collection,
- OperationContext* txn,
+ OperationContext* opCtx,
WorkingSet* ws,
WorkingSetID id,
const CanonicalQuery* cq) {
// If the snapshot changed, then we have to make sure we have the latest copy of the doc and
// that it still matches.
WorkingSetMember* member = ws->get(id);
- if (txn->recoveryUnit()->getSnapshotId() != member->obj.snapshotId()) {
- std::unique_ptr<SeekableRecordCursor> cursor(collection->getCursor(txn));
+ if (opCtx->recoveryUnit()->getSnapshotId() != member->obj.snapshotId()) {
+ std::unique_ptr<SeekableRecordCursor> cursor(collection->getCursor(opCtx));
- if (!WorkingSetCommon::fetch(txn, ws, id, cursor)) {
+ if (!WorkingSetCommon::fetch(opCtx, ws, id, cursor)) {
// Doc is already deleted.
return false;
}
diff --git a/src/mongo/db/exec/write_stage_common.h b/src/mongo/db/exec/write_stage_common.h
index 388c7d2c763..19e03276e2c 100644
--- a/src/mongo/db/exec/write_stage_common.h
+++ b/src/mongo/db/exec/write_stage_common.h
@@ -50,7 +50,7 @@ namespace write_stage_common {
* still exists.
*/
bool ensureStillMatches(const Collection* collection,
- OperationContext* txn,
+ OperationContext* opCtx,
WorkingSet* ws,
WorkingSetID id,
const CanonicalQuery* cq);
diff --git a/src/mongo/db/ftdc/collector.cpp b/src/mongo/db/ftdc/collector.cpp
index 611f12dff5a..4441f2ef126 100644
--- a/src/mongo/db/ftdc/collector.cpp
+++ b/src/mongo/db/ftdc/collector.cpp
@@ -65,8 +65,8 @@ std::tuple<BSONObj, Date_t> FTDCCollectorCollection::collect(Client* client) {
// All collectors should be ok seeing the inconsistent states in the middle of replication
// batches. This is desirable because we want to be able to collect data in the middle of
// batches that are taking a long time.
- auto txn = client->makeOperationContext();
- txn->lockState()->setShouldConflictWithSecondaryBatchApplication(false);
+ auto opCtx = client->makeOperationContext();
+ opCtx->lockState()->setShouldConflictWithSecondaryBatchApplication(false);
for (auto& collector : _collectors) {
BSONObjBuilder subObjBuilder(builder.subobjStart(collector->name()));
@@ -84,8 +84,8 @@ std::tuple<BSONObj, Date_t> FTDCCollectorCollection::collect(Client* client) {
subObjBuilder.appendDate(kFTDCCollectStartField, now);
{
- ScopedTransaction st(txn.get(), MODE_IS);
- collector->collect(txn.get(), subObjBuilder);
+ ScopedTransaction st(opCtx.get(), MODE_IS);
+ collector->collect(opCtx.get(), subObjBuilder);
}
end = client->getServiceContext()->getPreciseClockSource()->now();
diff --git a/src/mongo/db/ftdc/collector.h b/src/mongo/db/ftdc/collector.h
index fd9efb199e2..90c81bda747 100644
--- a/src/mongo/db/ftdc/collector.h
+++ b/src/mongo/db/ftdc/collector.h
@@ -66,7 +66,7 @@ public:
* If a collector fails to collect data, it should update builder with the result of the
* failure.
*/
- virtual void collect(OperationContext* txn, BSONObjBuilder& builder) = 0;
+ virtual void collect(OperationContext* opCtx, BSONObjBuilder& builder) = 0;
protected:
FTDCCollectorInterface() = default;
diff --git a/src/mongo/db/ftdc/controller_test.cpp b/src/mongo/db/ftdc/controller_test.cpp
index 365f06580cd..ba545b3ad04 100644
--- a/src/mongo/db/ftdc/controller_test.cpp
+++ b/src/mongo/db/ftdc/controller_test.cpp
@@ -56,7 +56,7 @@ public:
ASSERT_TRUE(_state == State::kStarted);
}
- void collect(OperationContext* txn, BSONObjBuilder& builder) final {
+ void collect(OperationContext* opCtx, BSONObjBuilder& builder) final {
_state = State::kStarted;
++_counter;
diff --git a/src/mongo/db/ftdc/ftdc_commands.cpp b/src/mongo/db/ftdc/ftdc_commands.cpp
index e9205b1b5ab..e50f5d9cf71 100644
--- a/src/mongo/db/ftdc/ftdc_commands.cpp
+++ b/src/mongo/db/ftdc/ftdc_commands.cpp
@@ -88,7 +88,7 @@ public:
return Status::OK();
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& db,
BSONObj& cmdObj,
int options,
@@ -96,7 +96,8 @@ public:
BSONObjBuilder& result) override {
result.append(
- "data", FTDCController::get(txn->getServiceContext())->getMostRecentPeriodicDocument());
+ "data",
+ FTDCController::get(opCtx->getServiceContext())->getMostRecentPeriodicDocument());
return true;
}
diff --git a/src/mongo/db/ftdc/ftdc_mongod.cpp b/src/mongo/db/ftdc/ftdc_mongod.cpp
index 60c1c46c9e4..e80fd8a20b9 100644
--- a/src/mongo/db/ftdc/ftdc_mongod.cpp
+++ b/src/mongo/db/ftdc/ftdc_mongod.cpp
@@ -256,10 +256,10 @@ public:
invariant(_command);
}
- void collect(OperationContext* txn, BSONObjBuilder& builder) override {
+ void collect(OperationContext* opCtx, BSONObjBuilder& builder) override {
std::string errmsg;
- bool ret = _command->run(txn, _ns, _cmdObj, 0, errmsg, builder);
+ bool ret = _command->run(opCtx, _ns, _cmdObj, 0, errmsg, builder);
// Some commands return errmsgs when they return false (collstats)
// Some commands return bson objs when they return false (replGetStatus)
diff --git a/src/mongo/db/ftdc/ftdc_system_stats_linux.cpp b/src/mongo/db/ftdc/ftdc_system_stats_linux.cpp
index 9c93718c171..316afde8170 100644
--- a/src/mongo/db/ftdc/ftdc_system_stats_linux.cpp
+++ b/src/mongo/db/ftdc/ftdc_system_stats_linux.cpp
@@ -77,7 +77,7 @@ public:
}
}
- void collect(OperationContext* txn, BSONObjBuilder& builder) override {
+ void collect(OperationContext* opCtx, BSONObjBuilder& builder) override {
{
BSONObjBuilder subObjBuilder(builder.subobjStart("cpu"_sd));
diff --git a/src/mongo/db/ftdc/ftdc_system_stats_windows.cpp b/src/mongo/db/ftdc/ftdc_system_stats_windows.cpp
index 87f1d8503be..be6086d8743 100644
--- a/src/mongo/db/ftdc/ftdc_system_stats_windows.cpp
+++ b/src/mongo/db/ftdc/ftdc_system_stats_windows.cpp
@@ -101,7 +101,7 @@ public:
WindowsSystemMetricsCollector(std::unique_ptr<PerfCounterCollector> collector)
: _collector(std::move(collector)) {}
- void collect(OperationContext* txn, BSONObjBuilder& builder) override {
+ void collect(OperationContext* opCtx, BSONObjBuilder& builder) override {
processStatusErrors(_collector->collect(&builder), &builder);
}
diff --git a/src/mongo/db/index/haystack_access_method.cpp b/src/mongo/db/index/haystack_access_method.cpp
index 877e94c0c7d..213c9f05baa 100644
--- a/src/mongo/db/index/haystack_access_method.cpp
+++ b/src/mongo/db/index/haystack_access_method.cpp
@@ -68,7 +68,7 @@ void HaystackAccessMethod::doGetKeys(const BSONObj& obj,
ExpressionKeysPrivate::getHaystackKeys(obj, _geoField, _otherFields, _bucketSize, keys);
}
-void HaystackAccessMethod::searchCommand(OperationContext* txn,
+void HaystackAccessMethod::searchCommand(OperationContext* opCtx,
Collection* collection,
const BSONObj& nearObj,
double maxDistance,
@@ -87,7 +87,7 @@ void HaystackAccessMethod::searchCommand(OperationContext* txn,
}
int scale = static_cast<int>(ceil(maxDistance / _bucketSize));
- GeoHaystackSearchHopper hopper(txn, nearObj, maxDistance, limit, _geoField, collection);
+ GeoHaystackSearchHopper hopper(opCtx, nearObj, maxDistance, limit, _geoField, collection);
long long btreeMatches = 0;
@@ -111,7 +111,7 @@ void HaystackAccessMethod::searchCommand(OperationContext* txn,
unique_ptr<PlanExecutor> exec(
- InternalPlanner::indexScan(txn,
+ InternalPlanner::indexScan(opCtx,
collection,
_descriptor,
key,
diff --git a/src/mongo/db/index/haystack_access_method.h b/src/mongo/db/index/haystack_access_method.h
index a6ff68bdd9f..4bf8fc41839 100644
--- a/src/mongo/db/index/haystack_access_method.h
+++ b/src/mongo/db/index/haystack_access_method.h
@@ -60,7 +60,7 @@ public:
protected:
friend class GeoHaystackSearchCommand;
- void searchCommand(OperationContext* txn,
+ void searchCommand(OperationContext* opCtx,
Collection* collection,
const BSONObj& nearObj,
double maxDistance,
diff --git a/src/mongo/db/index/haystack_access_method_internal.h b/src/mongo/db/index/haystack_access_method_internal.h
index 4bc2e7430ad..b1d26b716ee 100644
--- a/src/mongo/db/index/haystack_access_method_internal.h
+++ b/src/mongo/db/index/haystack_access_method_internal.h
@@ -48,13 +48,13 @@ public:
* @param limit The maximum number of results to return
* @param geoField Which field in the provided RecordId has the point to test.
*/
- GeoHaystackSearchHopper(OperationContext* txn,
+ GeoHaystackSearchHopper(OperationContext* opCtx,
const BSONObj& nearObj,
double maxDistance,
unsigned limit,
const std::string& geoField,
const Collection* collection)
- : _txn(txn),
+ : _opCtx(opCtx),
_collection(collection),
_near(nearObj),
_maxDistance(maxDistance),
@@ -66,7 +66,7 @@ public:
void consider(const RecordId& loc) {
if (limitReached())
return;
- Point p(dps::extractElementAtPath(_collection->docFor(_txn, loc).value(), _geoField));
+ Point p(dps::extractElementAtPath(_collection->docFor(_opCtx, loc).value(), _geoField));
if (distance(_near, p) > _maxDistance)
return;
_locs.push_back(loc);
@@ -74,7 +74,7 @@ public:
int appendResultsTo(BSONArrayBuilder* b) {
for (unsigned i = 0; i < _locs.size(); i++)
- b->append(_collection->docFor(_txn, _locs[i]).value());
+ b->append(_collection->docFor(_opCtx, _locs[i]).value());
return _locs.size();
}
@@ -84,7 +84,7 @@ public:
}
private:
- OperationContext* _txn;
+ OperationContext* _opCtx;
const Collection* _collection;
Point _near;
diff --git a/src/mongo/db/index/index_access_method.cpp b/src/mongo/db/index/index_access_method.cpp
index c787d1429dc..0a087140600 100644
--- a/src/mongo/db/index/index_access_method.cpp
+++ b/src/mongo/db/index/index_access_method.cpp
@@ -112,16 +112,16 @@ IndexAccessMethod::IndexAccessMethod(IndexCatalogEntry* btreeState, SortedDataIn
verify(IndexDescriptor::isIndexVersionSupported(_descriptor->version()));
}
-bool IndexAccessMethod::ignoreKeyTooLong(OperationContext* txn) {
+bool IndexAccessMethod::ignoreKeyTooLong(OperationContext* opCtx) {
// Ignore this error if we cannot write to the collection or if the user requested it
const auto shouldRelaxConstraints =
- repl::ReplicationCoordinator::get(txn)->shouldRelaxIndexConstraints(
- txn, NamespaceString(_btreeState->ns()));
+ repl::ReplicationCoordinator::get(opCtx)->shouldRelaxIndexConstraints(
+ opCtx, NamespaceString(_btreeState->ns()));
return shouldRelaxConstraints || !failIndexKeyTooLong.load();
}
// Find the keys for obj, put them in the tree pointing to loc
-Status IndexAccessMethod::insert(OperationContext* txn,
+Status IndexAccessMethod::insert(OperationContext* opCtx,
const BSONObj& obj,
const RecordId& loc,
const InsertDeleteOptions& options,
@@ -135,7 +135,7 @@ Status IndexAccessMethod::insert(OperationContext* txn,
Status ret = Status::OK();
for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) {
- Status status = _newInterface->insert(txn, *i, loc, options.dupsAllowed);
+ Status status = _newInterface->insert(opCtx, *i, loc, options.dupsAllowed);
// Everything's OK, carry on.
if (status.isOK()) {
@@ -145,14 +145,14 @@ Status IndexAccessMethod::insert(OperationContext* txn,
// Error cases.
- if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong(txn)) {
+ if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong(opCtx)) {
continue;
}
if (status.code() == ErrorCodes::DuplicateKeyValue) {
// A document might be indexed multiple times during a background index build
// if it moves ahead of the collection scan cursor (e.g. via an update).
- if (!_btreeState->isReady(txn)) {
+ if (!_btreeState->isReady(opCtx)) {
LOG(3) << "key " << *i << " already in index during background indexing (ok)";
continue;
}
@@ -160,7 +160,7 @@ Status IndexAccessMethod::insert(OperationContext* txn,
// Clean up after ourselves.
for (BSONObjSet::const_iterator j = keys.begin(); j != i; ++j) {
- removeOneKey(txn, *j, loc, options.dupsAllowed);
+ removeOneKey(opCtx, *j, loc, options.dupsAllowed);
*numInserted = 0;
}
@@ -168,18 +168,18 @@ Status IndexAccessMethod::insert(OperationContext* txn,
}
if (*numInserted > 1 || isMultikeyFromPaths(multikeyPaths)) {
- _btreeState->setMultikey(txn, multikeyPaths);
+ _btreeState->setMultikey(opCtx, multikeyPaths);
}
return ret;
}
-void IndexAccessMethod::removeOneKey(OperationContext* txn,
+void IndexAccessMethod::removeOneKey(OperationContext* opCtx,
const BSONObj& key,
const RecordId& loc,
bool dupsAllowed) {
try {
- _newInterface->unindex(txn, key, loc, dupsAllowed);
+ _newInterface->unindex(opCtx, key, loc, dupsAllowed);
} catch (AssertionException& e) {
log() << "Assertion failure: _unindex failed " << _descriptor->indexNamespace();
log() << "Assertion failure: _unindex failed: " << redact(e) << " key:" << key.toString()
@@ -188,18 +188,18 @@ void IndexAccessMethod::removeOneKey(OperationContext* txn,
}
}
-std::unique_ptr<SortedDataInterface::Cursor> IndexAccessMethod::newCursor(OperationContext* txn,
+std::unique_ptr<SortedDataInterface::Cursor> IndexAccessMethod::newCursor(OperationContext* opCtx,
bool isForward) const {
- return _newInterface->newCursor(txn, isForward);
+ return _newInterface->newCursor(opCtx, isForward);
}
std::unique_ptr<SortedDataInterface::Cursor> IndexAccessMethod::newRandomCursor(
- OperationContext* txn) const {
- return _newInterface->newRandomCursor(txn);
+ OperationContext* opCtx) const {
+ return _newInterface->newRandomCursor(opCtx);
}
// Remove the provided doc from the index.
-Status IndexAccessMethod::remove(OperationContext* txn,
+Status IndexAccessMethod::remove(OperationContext* opCtx,
const BSONObj& obj,
const RecordId& loc,
const InsertDeleteOptions& options,
@@ -214,25 +214,25 @@ Status IndexAccessMethod::remove(OperationContext* txn,
getKeys(obj, options.getKeysMode, &keys, multikeyPaths);
for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) {
- removeOneKey(txn, *i, loc, options.dupsAllowed);
+ removeOneKey(opCtx, *i, loc, options.dupsAllowed);
++*numDeleted;
}
return Status::OK();
}
-Status IndexAccessMethod::initializeAsEmpty(OperationContext* txn) {
- return _newInterface->initAsEmpty(txn);
+Status IndexAccessMethod::initializeAsEmpty(OperationContext* opCtx) {
+ return _newInterface->initAsEmpty(opCtx);
}
-Status IndexAccessMethod::touch(OperationContext* txn, const BSONObj& obj) {
+Status IndexAccessMethod::touch(OperationContext* opCtx, const BSONObj& obj) {
BSONObjSet keys = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
// There's no need to compute the prefixes of the indexed fields that cause the index to be
// multikey when paging a document's index entries into memory.
MultikeyPaths* multikeyPaths = nullptr;
getKeys(obj, GetKeysMode::kEnforceConstraints, &keys, multikeyPaths);
- std::unique_ptr<SortedDataInterface::Cursor> cursor(_newInterface->newCursor(txn));
+ std::unique_ptr<SortedDataInterface::Cursor> cursor(_newInterface->newCursor(opCtx));
for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) {
cursor->seekExact(*i);
}
@@ -241,11 +241,11 @@ Status IndexAccessMethod::touch(OperationContext* txn, const BSONObj& obj) {
}
-Status IndexAccessMethod::touch(OperationContext* txn) const {
- return _newInterface->touch(txn);
+Status IndexAccessMethod::touch(OperationContext* opCtx) const {
+ return _newInterface->touch(opCtx);
}
-RecordId IndexAccessMethod::findSingle(OperationContext* txn, const BSONObj& requestedKey) const {
+RecordId IndexAccessMethod::findSingle(OperationContext* opCtx, const BSONObj& requestedKey) const {
// Generate the key for this index.
BSONObj actualKey;
if (_btreeState->getCollator()) {
@@ -259,7 +259,7 @@ RecordId IndexAccessMethod::findSingle(OperationContext* txn, const BSONObj& req
actualKey = requestedKey;
}
- std::unique_ptr<SortedDataInterface::Cursor> cursor(_newInterface->newCursor(txn));
+ std::unique_ptr<SortedDataInterface::Cursor> cursor(_newInterface->newCursor(opCtx));
const auto requestedInfo = kDebugBuild ? SortedDataInterface::Cursor::kKeyAndLoc
: SortedDataInterface::Cursor::kWantLoc;
if (auto kv = cursor->seekExact(actualKey, requestedInfo)) {
@@ -274,23 +274,23 @@ RecordId IndexAccessMethod::findSingle(OperationContext* txn, const BSONObj& req
return RecordId();
}
-Status IndexAccessMethod::validate(OperationContext* txn,
+Status IndexAccessMethod::validate(OperationContext* opCtx,
int64_t* numKeys,
ValidateResults* fullResults) {
long long keys = 0;
- _newInterface->fullValidate(txn, &keys, fullResults);
+ _newInterface->fullValidate(opCtx, &keys, fullResults);
*numKeys = keys;
return Status::OK();
}
-bool IndexAccessMethod::appendCustomStats(OperationContext* txn,
+bool IndexAccessMethod::appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* output,
double scale) const {
- return _newInterface->appendCustomStats(txn, output, scale);
+ return _newInterface->appendCustomStats(opCtx, output, scale);
}
-long long IndexAccessMethod::getSpaceUsedBytes(OperationContext* txn) const {
- return _newInterface->getSpaceUsedBytes(txn);
+long long IndexAccessMethod::getSpaceUsedBytes(OperationContext* opCtx) const {
+ return _newInterface->getSpaceUsedBytes(opCtx);
}
pair<vector<BSONObj>, vector<BSONObj>> IndexAccessMethod::setDifference(const BSONObjSet& left,
@@ -329,7 +329,7 @@ pair<vector<BSONObj>, vector<BSONObj>> IndexAccessMethod::setDifference(const BS
return {std::move(onlyLeft), std::move(onlyRight)};
}
-Status IndexAccessMethod::validateUpdate(OperationContext* txn,
+Status IndexAccessMethod::validateUpdate(OperationContext* opCtx,
const BSONObj& from,
const BSONObj& to,
const RecordId& record,
@@ -358,7 +358,7 @@ Status IndexAccessMethod::validateUpdate(OperationContext* txn,
return Status::OK();
}
-Status IndexAccessMethod::update(OperationContext* txn,
+Status IndexAccessMethod::update(OperationContext* opCtx,
const UpdateTicket& ticket,
int64_t* numInserted,
int64_t* numDeleted) {
@@ -374,17 +374,18 @@ Status IndexAccessMethod::update(OperationContext* txn,
if (ticket.oldKeys.size() + ticket.added.size() - ticket.removed.size() > 1 ||
isMultikeyFromPaths(ticket.newMultikeyPaths)) {
- _btreeState->setMultikey(txn, ticket.newMultikeyPaths);
+ _btreeState->setMultikey(opCtx, ticket.newMultikeyPaths);
}
for (size_t i = 0; i < ticket.removed.size(); ++i) {
- _newInterface->unindex(txn, ticket.removed[i], ticket.loc, ticket.dupsAllowed);
+ _newInterface->unindex(opCtx, ticket.removed[i], ticket.loc, ticket.dupsAllowed);
}
for (size_t i = 0; i < ticket.added.size(); ++i) {
- Status status = _newInterface->insert(txn, ticket.added[i], ticket.loc, ticket.dupsAllowed);
+ Status status =
+ _newInterface->insert(opCtx, ticket.added[i], ticket.loc, ticket.dupsAllowed);
if (!status.isOK()) {
- if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong(txn)) {
+ if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong(opCtx)) {
// Ignore.
continue;
}
@@ -399,8 +400,8 @@ Status IndexAccessMethod::update(OperationContext* txn,
return Status::OK();
}
-Status IndexAccessMethod::compact(OperationContext* txn) {
- return this->_newInterface->compact(txn);
+Status IndexAccessMethod::compact(OperationContext* opCtx) {
+ return this->_newInterface->compact(opCtx);
}
std::unique_ptr<IndexAccessMethod::BulkBuilder> IndexAccessMethod::initiateBulk(
@@ -419,7 +420,7 @@ IndexAccessMethod::BulkBuilder::BulkBuilder(const IndexAccessMethod* index,
BtreeExternalSortComparison(descriptor->keyPattern(), descriptor->version()))),
_real(index) {}
-Status IndexAccessMethod::BulkBuilder::insert(OperationContext* txn,
+Status IndexAccessMethod::BulkBuilder::insert(OperationContext* opCtx,
const BSONObj& obj,
const RecordId& loc,
const InsertDeleteOptions& options,
@@ -455,7 +456,7 @@ Status IndexAccessMethod::BulkBuilder::insert(OperationContext* txn,
}
-Status IndexAccessMethod::commitBulk(OperationContext* txn,
+Status IndexAccessMethod::commitBulk(OperationContext* opCtx,
std::unique_ptr<BulkBuilder> bulk,
bool mayInterrupt,
bool dupsAllowed,
@@ -464,38 +465,38 @@ Status IndexAccessMethod::commitBulk(OperationContext* txn,
std::unique_ptr<BulkBuilder::Sorter::Iterator> i(bulk->_sorter->done());
- stdx::unique_lock<Client> lk(*txn->getClient());
- ProgressMeterHolder pm(*txn->setMessage_inlock("Index Bulk Build: (2/3) btree bottom up",
- "Index: (2/3) BTree Bottom Up Progress",
- bulk->_keysInserted,
- 10));
+ stdx::unique_lock<Client> lk(*opCtx->getClient());
+ ProgressMeterHolder pm(*opCtx->setMessage_inlock("Index Bulk Build: (2/3) btree bottom up",
+ "Index: (2/3) BTree Bottom Up Progress",
+ bulk->_keysInserted,
+ 10));
lk.unlock();
std::unique_ptr<SortedDataBuilderInterface> builder;
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
if (bulk->_everGeneratedMultipleKeys || isMultikeyFromPaths(bulk->_indexMultikeyPaths)) {
- _btreeState->setMultikey(txn, bulk->_indexMultikeyPaths);
+ _btreeState->setMultikey(opCtx, bulk->_indexMultikeyPaths);
}
- builder.reset(_newInterface->getBulkBuilder(txn, dupsAllowed));
+ builder.reset(_newInterface->getBulkBuilder(opCtx, dupsAllowed));
wunit.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "setting index multikey flag", "");
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "setting index multikey flag", "");
while (i->more()) {
if (mayInterrupt) {
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
}
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
// Improve performance in the btree-building phase by disabling rollback tracking.
// This avoids copying all the written bytes to a buffer that is only used to roll back.
// Note that this is safe to do, as this entire index-build-in-progress will be cleaned
// up by the index system.
- txn->recoveryUnit()->setRollbackWritesDisabled();
+ opCtx->recoveryUnit()->setRollbackWritesDisabled();
// Get the next datum and add it to the builder.
BulkBuilder::Sorter::Data d = i->next();
@@ -503,7 +504,7 @@ Status IndexAccessMethod::commitBulk(OperationContext* txn,
if (!status.isOK()) {
// Overlong key that's OK to skip?
- if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong(txn)) {
+ if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong(opCtx)) {
continue;
}
@@ -529,9 +530,9 @@ Status IndexAccessMethod::commitBulk(OperationContext* txn,
pm.finished();
{
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setMessage_inlock("Index Bulk Build: (3/3) btree-middle",
- "Index: (3/3) BTree Middle Progress");
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setMessage_inlock("Index Bulk Build: (3/3) btree-middle",
+ "Index: (3/3) BTree Middle Progress");
}
LOG(timer.seconds() > 10 ? 0 : 1) << "\t done building bottom layer, going to commit";
diff --git a/src/mongo/db/index/index_access_method.h b/src/mongo/db/index/index_access_method.h
index b96260b11f8..9a41bf1961a 100644
--- a/src/mongo/db/index/index_access_method.h
+++ b/src/mongo/db/index/index_access_method.h
@@ -78,7 +78,7 @@ public:
*
* The behavior of the insertion can be specified through 'options'.
*/
- Status insert(OperationContext* txn,
+ Status insert(OperationContext* opCtx,
const BSONObj& obj,
const RecordId& loc,
const InsertDeleteOptions& options,
@@ -88,7 +88,7 @@ public:
* Analogous to above, but remove the records instead of inserting them.
* 'numDeleted' will be set to the number of keys removed from the index for the document.
*/
- Status remove(OperationContext* txn,
+ Status remove(OperationContext* opCtx,
const BSONObj& obj,
const RecordId& loc,
const InsertDeleteOptions& options,
@@ -104,7 +104,7 @@ public:
*
* There is no obligation to perform the update after performing validation.
*/
- Status validateUpdate(OperationContext* txn,
+ Status validateUpdate(OperationContext* opCtx,
const BSONObj& from,
const BSONObj& to,
const RecordId& loc,
@@ -123,7 +123,7 @@ public:
* 'numInserted' will be set to the number of keys inserted into the index for the document.
* 'numDeleted' will be set to the number of keys removed from the index for the document.
*/
- Status update(OperationContext* txn,
+ Status update(OperationContext* opCtx,
const UpdateTicket& ticket,
int64_t* numInserted,
int64_t* numDeleted);
@@ -131,12 +131,12 @@ public:
/**
* Returns an unpositioned cursor over 'this' index.
*/
- std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* txn,
+ std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* opCtx,
bool isForward = true) const;
/**
* Returns a pseudo-random cursor over 'this' index.
*/
- std::unique_ptr<SortedDataInterface::Cursor> newRandomCursor(OperationContext* txn) const;
+ std::unique_ptr<SortedDataInterface::Cursor> newRandomCursor(OperationContext* opCtx) const;
// ------ index level operations ------
@@ -146,7 +146,7 @@ public:
* only called once for the lifetime of the index
* if called multiple times, is an error
*/
- Status initializeAsEmpty(OperationContext* txn);
+ Status initializeAsEmpty(OperationContext* opCtx);
/**
* Try to page-in the pages that contain the keys generated from 'obj'.
@@ -154,12 +154,12 @@ public:
* appropriate pages are not swapped out.
* See prefetch.cpp.
*/
- Status touch(OperationContext* txn, const BSONObj& obj);
+ Status touch(OperationContext* opCtx, const BSONObj& obj);
/**
* this pages in the entire index
*/
- Status touch(OperationContext* txn) const;
+ Status touch(OperationContext* opCtx) const;
/**
* Walk the entire index, checking the internal structure for consistency.
@@ -167,7 +167,7 @@ public:
* Return OK if the index is valid.
*/
- Status validate(OperationContext* txn, int64_t* numKeys, ValidateResults* fullResults);
+ Status validate(OperationContext* opCtx, int64_t* numKeys, ValidateResults* fullResults);
/**
* Add custom statistics about this index to BSON object builder, for display.
@@ -176,21 +176,21 @@ public:
*
* Returns true if stats were appended.
*/
- bool appendCustomStats(OperationContext* txn, BSONObjBuilder* result, double scale) const;
+ bool appendCustomStats(OperationContext* opCtx, BSONObjBuilder* result, double scale) const;
/**
* @return The number of bytes consumed by this index.
* Exactly what is counted is not defined based on padding, re-use, etc...
*/
- long long getSpaceUsedBytes(OperationContext* txn) const;
+ long long getSpaceUsedBytes(OperationContext* opCtx) const;
- RecordId findSingle(OperationContext* txn, const BSONObj& key) const;
+ RecordId findSingle(OperationContext* opCtx, const BSONObj& key) const;
/**
* Attempt compaction to regain disk space if the indexed record store supports
* compaction-in-place.
*/
- Status compact(OperationContext* txn);
+ Status compact(OperationContext* opCtx);
//
// Bulk operations support
@@ -201,7 +201,7 @@ public:
/**
* Insert into the BulkBuilder as-if inserting into an IndexAccessMethod.
*/
- Status insert(OperationContext* txn,
+ Status insert(OperationContext* opCtx,
const BSONObj& obj,
const RecordId& loc,
const InsertDeleteOptions& options,
@@ -250,7 +250,7 @@ public:
* @param dups - if NULL, error out on dups if not allowed
* if not NULL, put the bad RecordIds there
*/
- Status commitBulk(OperationContext* txn,
+ Status commitBulk(OperationContext* opCtx,
std::unique_ptr<BulkBuilder> bulk,
bool mayInterrupt,
bool dupsAllowed,
@@ -306,13 +306,13 @@ protected:
/**
* Determines whether it's OK to ignore ErrorCodes::KeyTooLong for this OperationContext
*/
- bool ignoreKeyTooLong(OperationContext* txn);
+ bool ignoreKeyTooLong(OperationContext* opCtx);
IndexCatalogEntry* _btreeState; // owned by IndexCatalogEntry
const IndexDescriptor* _descriptor;
private:
- void removeOneKey(OperationContext* txn,
+ void removeOneKey(OperationContext* opCtx,
const BSONObj& key,
const RecordId& loc,
bool dupsAllowed);
diff --git a/src/mongo/db/index_builder.cpp b/src/mongo/db/index_builder.cpp
index d749a330184..6cbfd6e4ae6 100644
--- a/src/mongo/db/index_builder.cpp
+++ b/src/mongo/db/index_builder.cpp
@@ -82,33 +82,33 @@ void IndexBuilder::run() {
Client::initThread(name().c_str());
LOG(2) << "IndexBuilder building index " << _index;
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- txn.lockState()->setShouldConflictWithSecondaryBatchApplication(false);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ opCtx.lockState()->setShouldConflictWithSecondaryBatchApplication(false);
- AuthorizationSession::get(txn.getClient())->grantInternalAuthorization();
+ AuthorizationSession::get(opCtx.getClient())->grantInternalAuthorization();
{
- stdx::lock_guard<Client> lk(*txn.getClient());
- CurOp::get(txn)->setNetworkOp_inlock(dbInsert);
+ stdx::lock_guard<Client> lk(*opCtx.getClient());
+ CurOp::get(opCtx)->setNetworkOp_inlock(dbInsert);
}
NamespaceString ns(_index["ns"].String());
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock dlk(txn.lockState(), ns.db(), MODE_X);
- OldClientContext ctx(&txn, ns.getSystemIndexesCollection());
+ ScopedTransaction transaction(&opCtx, MODE_IX);
+ Lock::DBLock dlk(opCtx.lockState(), ns.db(), MODE_X);
+ OldClientContext ctx(&opCtx, ns.getSystemIndexesCollection());
- Database* db = dbHolder().get(&txn, ns.db().toString());
+ Database* db = dbHolder().get(&opCtx, ns.db().toString());
- Status status = _build(&txn, db, true, &dlk);
+ Status status = _build(&opCtx, db, true, &dlk);
if (!status.isOK()) {
error() << "IndexBuilder could not build index: " << redact(status);
fassert(28555, ErrorCodes::isInterruption(status.code()));
}
}
-Status IndexBuilder::buildInForeground(OperationContext* txn, Database* db) const {
- return _build(txn, db, false, NULL);
+Status IndexBuilder::buildInForeground(OperationContext* opCtx, Database* db) const {
+ return _build(opCtx, db, false, NULL);
}
void IndexBuilder::waitForBgIndexStarting() {
@@ -120,7 +120,7 @@ void IndexBuilder::waitForBgIndexStarting() {
_bgIndexStarting = false;
}
-Status IndexBuilder::_build(OperationContext* txn,
+Status IndexBuilder::_build(OperationContext* opCtx,
Database* db,
bool allowBackgroundBuilding,
Lock::DBLock* dbLock) const {
@@ -130,31 +130,31 @@ Status IndexBuilder::_build(OperationContext* txn,
if (!c) {
while (true) {
try {
- WriteUnitOfWork wunit(txn);
- c = db->getOrCreateCollection(txn, ns.ns());
+ WriteUnitOfWork wunit(opCtx);
+ c = db->getOrCreateCollection(opCtx, ns.ns());
verify(c);
wunit.commit();
break;
} catch (const WriteConflictException& wce) {
LOG(2) << "WriteConflictException while creating collection in IndexBuilder"
<< ", retrying.";
- txn->recoveryUnit()->abandonSnapshot();
+ opCtx->recoveryUnit()->abandonSnapshot();
continue;
}
}
}
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
// Show which index we're building in the curop display.
- CurOp::get(txn)->setQuery_inlock(_index);
+ CurOp::get(opCtx)->setQuery_inlock(_index);
}
bool haveSetBgIndexStarting = false;
while (true) {
Status status = Status::OK();
try {
- MultiIndexBlock indexer(txn, c);
+ MultiIndexBlock indexer(opCtx, c);
indexer.allowInterruption();
if (allowBackgroundBuilding)
@@ -183,7 +183,7 @@ Status IndexBuilder::_build(OperationContext* txn,
dbLock->relockWithMode(MODE_IX);
}
- Lock::CollectionLock colLock(txn->lockState(), ns.ns(), MODE_IX);
+ Lock::CollectionLock colLock(opCtx->lockState(), ns.ns(), MODE_IX);
status = indexer.insertAllDocumentsInCollection();
}
@@ -191,7 +191,7 @@ Status IndexBuilder::_build(OperationContext* txn,
if (allowBackgroundBuilding) {
dbLock->relockWithMode(MODE_X);
}
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
indexer.commit();
wunit.commit();
}
@@ -204,7 +204,7 @@ Status IndexBuilder::_build(OperationContext* txn,
if (allowBackgroundBuilding) {
dbLock->relockWithMode(MODE_X);
- Database* reloadDb = dbHolder().get(txn, ns.db());
+ Database* reloadDb = dbHolder().get(opCtx, ns.db());
fassert(28553, reloadDb);
fassert(28554, reloadDb->getCollection(ns.ns()));
}
@@ -223,7 +223,7 @@ Status IndexBuilder::_build(OperationContext* txn,
LOG(2) << "WriteConflictException while creating index in IndexBuilder, retrying.";
- txn->recoveryUnit()->abandonSnapshot();
+ opCtx->recoveryUnit()->abandonSnapshot();
}
}
}
diff --git a/src/mongo/db/index_builder.h b/src/mongo/db/index_builder.h
index bdca706c108..926fd32a5cc 100644
--- a/src/mongo/db/index_builder.h
+++ b/src/mongo/db/index_builder.h
@@ -74,7 +74,7 @@ public:
*/
virtual std::string name() const;
- Status buildInForeground(OperationContext* txn, Database* db) const;
+ Status buildInForeground(OperationContext* opCtx, Database* db) const;
/**
* Waits for a background index build to register itself. This function must be called
@@ -84,7 +84,7 @@ public:
static void waitForBgIndexStarting();
private:
- Status _build(OperationContext* txn,
+ Status _build(OperationContext* opCtx,
Database* db,
bool allowBackgroundBuilding,
Lock::DBLock* dbLock) const;
diff --git a/src/mongo/db/index_legacy.cpp b/src/mongo/db/index_legacy.cpp
index f477b562927..3cdf16f0255 100644
--- a/src/mongo/db/index_legacy.cpp
+++ b/src/mongo/db/index_legacy.cpp
@@ -55,13 +55,13 @@ StatusWith<BSONObj> IndexLegacy::adjustIndexSpecObject(const BSONObj& obj) {
}
// static
-BSONObj IndexLegacy::getMissingField(OperationContext* txn,
+BSONObj IndexLegacy::getMissingField(OperationContext* opCtx,
Collection* collection,
const BSONObj& infoObj) {
BSONObj keyPattern = infoObj.getObjectField("key");
std::string accessMethodName;
if (collection)
- accessMethodName = collection->getIndexCatalog()->getAccessMethodName(txn, keyPattern);
+ accessMethodName = collection->getIndexCatalog()->getAccessMethodName(opCtx, keyPattern);
else
accessMethodName = IndexNames::findPluginName(keyPattern);
diff --git a/src/mongo/db/index_legacy.h b/src/mongo/db/index_legacy.h
index e1408a16dd3..e05ed711fe5 100644
--- a/src/mongo/db/index_legacy.h
+++ b/src/mongo/db/index_legacy.h
@@ -68,7 +68,7 @@ public:
*
* This is a significant leak of index functionality out of the index layer.
*/
- static BSONObj getMissingField(OperationContext* txn,
+ static BSONObj getMissingField(OperationContext* opCtx,
Collection* collection,
const BSONObj& infoObj);
};
diff --git a/src/mongo/db/index_rebuilder.cpp b/src/mongo/db/index_rebuilder.cpp
index c42cbc1dcee..17db2f26b64 100644
--- a/src/mongo/db/index_rebuilder.cpp
+++ b/src/mongo/db/index_rebuilder.cpp
@@ -55,7 +55,7 @@ using std::string;
using std::vector;
namespace {
-void checkNS(OperationContext* txn, const std::list<std::string>& nsToCheck) {
+void checkNS(OperationContext* opCtx, const std::list<std::string>& nsToCheck) {
bool firstTime = true;
for (std::list<std::string>::const_iterator it = nsToCheck.begin(); it != nsToCheck.end();
++it) {
@@ -65,9 +65,9 @@ void checkNS(OperationContext* txn, const std::list<std::string>& nsToCheck) {
// This write lock is held throughout the index building process
// for this namespace.
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), MODE_X);
- OldClientContext ctx(txn, ns);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock lk(opCtx->lockState(), nsToDatabaseSubstring(ns), MODE_X);
+ OldClientContext ctx(opCtx, ns);
Collection* collection = ctx.db()->getCollection(ns);
if (collection == NULL)
@@ -75,18 +75,18 @@ void checkNS(OperationContext* txn, const std::list<std::string>& nsToCheck) {
IndexCatalog* indexCatalog = collection->getIndexCatalog();
- if (collection->ns().isOplog() && indexCatalog->numIndexesTotal(txn) > 0) {
+ if (collection->ns().isOplog() && indexCatalog->numIndexesTotal(opCtx) > 0) {
warning() << ns << " had illegal indexes, removing";
- indexCatalog->dropAllIndexes(txn, true);
+ indexCatalog->dropAllIndexes(opCtx, true);
continue;
}
- MultiIndexBlock indexer(txn, collection);
+ MultiIndexBlock indexer(opCtx, collection);
{
- WriteUnitOfWork wunit(txn);
- vector<BSONObj> indexesToBuild = indexCatalog->getAndClearUnfinishedIndexes(txn);
+ WriteUnitOfWork wunit(opCtx);
+ vector<BSONObj> indexesToBuild = indexCatalog->getAndClearUnfinishedIndexes(opCtx);
// The indexes have now been removed from system.indexes, so the only record is
// in-memory. If there is a journal commit between now and when insert() rewrites
@@ -122,7 +122,7 @@ void checkNS(OperationContext* txn, const std::list<std::string>& nsToCheck) {
try {
uassertStatusOK(indexer.insertAllDocumentsInCollection());
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
indexer.commit();
wunit.commit();
} catch (const DBException& e) {
@@ -142,8 +142,8 @@ void checkNS(OperationContext* txn, const std::list<std::string>& nsToCheck) {
}
} // namespace
-void restartInProgressIndexesFromLastShutdown(OperationContext* txn) {
- AuthorizationSession::get(txn->getClient())->grantInternalAuthorization();
+void restartInProgressIndexesFromLastShutdown(OperationContext* opCtx) {
+ AuthorizationSession::get(opCtx->getClient())->grantInternalAuthorization();
std::vector<std::string> dbNames;
@@ -155,13 +155,13 @@ void restartInProgressIndexesFromLastShutdown(OperationContext* txn) {
for (std::vector<std::string>::const_iterator dbName = dbNames.begin();
dbName < dbNames.end();
++dbName) {
- ScopedTransaction scopedXact(txn, MODE_IS);
- AutoGetDb autoDb(txn, *dbName, MODE_S);
+ ScopedTransaction scopedXact(opCtx, MODE_IS);
+ AutoGetDb autoDb(opCtx, *dbName, MODE_S);
Database* db = autoDb.getDb();
db->getDatabaseCatalogEntry()->getCollectionNamespaces(&collNames);
}
- checkNS(txn, collNames);
+ checkNS(opCtx, collNames);
} catch (const DBException& e) {
error() << "Index verification did not complete: " << redact(e);
fassertFailedNoTrace(18643);
diff --git a/src/mongo/db/index_rebuilder.h b/src/mongo/db/index_rebuilder.h
index bf01367e783..06017cced90 100644
--- a/src/mongo/db/index_rebuilder.h
+++ b/src/mongo/db/index_rebuilder.h
@@ -36,5 +36,5 @@ class OperationContext;
* Restarts building indexes that were in progress during shutdown.
* Only call this at startup before taking requests.
*/
-void restartInProgressIndexesFromLastShutdown(OperationContext* txn);
+void restartInProgressIndexesFromLastShutdown(OperationContext* opCtx);
}
diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp
index cd802146305..243c23087e1 100644
--- a/src/mongo/db/introspect.cpp
+++ b/src/mongo/db/introspect.cpp
@@ -82,7 +82,7 @@ void _appendUserInfo(const CurOp& c, BSONObjBuilder& builder, AuthorizationSessi
} // namespace
-void profile(OperationContext* txn, NetworkOp op) {
+void profile(OperationContext* opCtx, NetworkOp op) {
// Initialize with 1kb at start in order to avoid realloc later
BufBuilder profileBufBuilder(1024);
@@ -90,15 +90,15 @@ void profile(OperationContext* txn, NetworkOp op) {
{
Locker::LockerInfo lockerInfo;
- txn->lockState()->getLockerInfo(&lockerInfo);
- CurOp::get(txn)->debug().append(*CurOp::get(txn), lockerInfo.stats, b);
+ opCtx->lockState()->getLockerInfo(&lockerInfo);
+ CurOp::get(opCtx)->debug().append(*CurOp::get(opCtx), lockerInfo.stats, b);
}
b.appendDate("ts", jsTime());
- b.append("client", txn->getClient()->clientAddress());
+ b.append("client", opCtx->getClient()->clientAddress());
const auto& clientMetadata =
- ClientMetadataIsMasterState::get(txn->getClient()).getClientMetadata();
+ ClientMetadataIsMasterState::get(opCtx->getClient()).getClientMetadata();
if (clientMetadata) {
auto appName = clientMetadata.get().getApplicationName();
if (!appName.empty()) {
@@ -106,50 +106,50 @@ void profile(OperationContext* txn, NetworkOp op) {
}
}
- AuthorizationSession* authSession = AuthorizationSession::get(txn->getClient());
- _appendUserInfo(*CurOp::get(txn), b, authSession);
+ AuthorizationSession* authSession = AuthorizationSession::get(opCtx->getClient());
+ _appendUserInfo(*CurOp::get(opCtx), b, authSession);
const BSONObj p = b.done();
- const bool wasLocked = txn->lockState()->isLocked();
+ const bool wasLocked = opCtx->lockState()->isLocked();
- const string dbName(nsToDatabase(CurOp::get(txn)->getNS()));
+ const string dbName(nsToDatabase(CurOp::get(opCtx)->getNS()));
try {
bool acquireDbXLock = false;
while (true) {
- ScopedTransaction scopedXact(txn, MODE_IX);
+ ScopedTransaction scopedXact(opCtx, MODE_IX);
std::unique_ptr<AutoGetDb> autoGetDb;
if (acquireDbXLock) {
- autoGetDb.reset(new AutoGetDb(txn, dbName, MODE_X));
+ autoGetDb.reset(new AutoGetDb(opCtx, dbName, MODE_X));
if (autoGetDb->getDb()) {
- createProfileCollection(txn, autoGetDb->getDb());
+ createProfileCollection(opCtx, autoGetDb->getDb());
}
} else {
- autoGetDb.reset(new AutoGetDb(txn, dbName, MODE_IX));
+ autoGetDb.reset(new AutoGetDb(opCtx, dbName, MODE_IX));
}
Database* const db = autoGetDb->getDb();
if (!db) {
// Database disappeared
log() << "note: not profiling because db went away for "
- << CurOp::get(txn)->getNS();
+ << CurOp::get(opCtx)->getNS();
break;
}
- Lock::CollectionLock collLock(txn->lockState(), db->getProfilingNS(), MODE_IX);
+ Lock::CollectionLock collLock(opCtx->lockState(), db->getProfilingNS(), MODE_IX);
Collection* const coll = db->getCollection(db->getProfilingNS());
if (coll) {
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
OpDebug* const nullOpDebug = nullptr;
- coll->insertDocument(txn, p, nullOpDebug, false);
+ coll->insertDocument(opCtx, p, nullOpDebug, false);
wuow.commit();
break;
} else if (!acquireDbXLock &&
- (!wasLocked || txn->lockState()->isDbLockedForMode(dbName, MODE_X))) {
+ (!wasLocked || opCtx->lockState()->isDbLockedForMode(dbName, MODE_X))) {
// Try to create the collection only if we are not under lock, in order to
// avoid deadlocks due to lock conversion. This would only be hit if someone
// deletes the profiler collection after setting profile level.
@@ -161,13 +161,13 @@ void profile(OperationContext* txn, NetworkOp op) {
}
} catch (const AssertionException& assertionEx) {
warning() << "Caught Assertion while trying to profile " << networkOpToString(op)
- << " against " << CurOp::get(txn)->getNS() << ": " << redact(assertionEx);
+ << " against " << CurOp::get(opCtx)->getNS() << ": " << redact(assertionEx);
}
}
-Status createProfileCollection(OperationContext* txn, Database* db) {
- invariant(txn->lockState()->isDbLockedForMode(db->name(), MODE_X));
+Status createProfileCollection(OperationContext* opCtx, Database* db) {
+ invariant(opCtx->lockState()->isDbLockedForMode(db->name(), MODE_X));
const std::string dbProfilingNS(db->getProfilingNS());
@@ -188,11 +188,11 @@ Status createProfileCollection(OperationContext* txn, Database* db) {
collectionOptions.capped = true;
collectionOptions.cappedSize = 1024 * 1024;
- WriteUnitOfWork wunit(txn);
- bool shouldReplicateWrites = txn->writesAreReplicated();
- txn->setReplicatedWrites(false);
- ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, txn, shouldReplicateWrites);
- invariant(db->createCollection(txn, dbProfilingNS, collectionOptions));
+ WriteUnitOfWork wunit(opCtx);
+ bool shouldReplicateWrites = opCtx->writesAreReplicated();
+ opCtx->setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, opCtx, shouldReplicateWrites);
+ invariant(db->createCollection(opCtx, dbProfilingNS, collectionOptions));
wunit.commit();
return Status::OK();
diff --git a/src/mongo/db/introspect.h b/src/mongo/db/introspect.h
index 4f44a1623a1..bf2214cde34 100644
--- a/src/mongo/db/introspect.h
+++ b/src/mongo/db/introspect.h
@@ -39,11 +39,11 @@ class OperationContext;
/**
* Invoked when database profile is enabled.
*/
-void profile(OperationContext* txn, NetworkOp op);
+void profile(OperationContext* opCtx, NetworkOp op);
/**
* Pre-creates the profile collection for the specified database.
*/
-Status createProfileCollection(OperationContext* txn, Database* db);
+Status createProfileCollection(OperationContext* opCtx, Database* db);
} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_text.cpp b/src/mongo/db/matcher/expression_text.cpp
index 8c8bd979cf8..9dcf093e591 100644
--- a/src/mongo/db/matcher/expression_text.cpp
+++ b/src/mongo/db/matcher/expression_text.cpp
@@ -41,7 +41,7 @@
namespace mongo {
-Status TextMatchExpression::init(OperationContext* txn,
+Status TextMatchExpression::init(OperationContext* opCtx,
const NamespaceString& nss,
TextParams params) {
_ftsQuery.setQuery(std::move(params.query));
@@ -52,9 +52,9 @@ Status TextMatchExpression::init(OperationContext* txn,
fts::TextIndexVersion version;
{
// Find text index.
- ScopedTransaction transaction(txn, MODE_IS);
- AutoGetDb autoDb(txn, nss.db(), MODE_IS);
- Lock::CollectionLock collLock(txn->lockState(), nss.ns(), MODE_IS);
+ ScopedTransaction transaction(opCtx, MODE_IS);
+ AutoGetDb autoDb(opCtx, nss.db(), MODE_IS);
+ Lock::CollectionLock collLock(opCtx->lockState(), nss.ns(), MODE_IS);
Database* db = autoDb.getDb();
if (!db) {
return {ErrorCodes::IndexNotFound,
@@ -70,7 +70,7 @@ Status TextMatchExpression::init(OperationContext* txn,
<< "')"};
}
std::vector<IndexDescriptor*> idxMatches;
- collection->getIndexCatalog()->findIndexByType(txn, IndexNames::TEXT, idxMatches);
+ collection->getIndexCatalog()->findIndexByType(opCtx, IndexNames::TEXT, idxMatches);
if (idxMatches.empty()) {
return {ErrorCodes::IndexNotFound, "text index required for $text query"};
}
diff --git a/src/mongo/db/matcher/expression_text.h b/src/mongo/db/matcher/expression_text.h
index 3bd44b29da3..bb5dbfb77b1 100644
--- a/src/mongo/db/matcher/expression_text.h
+++ b/src/mongo/db/matcher/expression_text.h
@@ -41,7 +41,7 @@ class OperationContext;
class TextMatchExpression : public TextMatchExpressionBase {
public:
- Status init(OperationContext* txn, const NamespaceString& nss, TextParams params);
+ Status init(OperationContext* opCtx, const NamespaceString& nss, TextParams params);
const fts::FTSQuery& getFTSQuery() const final {
return _ftsQuery;
diff --git a/src/mongo/db/matcher/expression_where.cpp b/src/mongo/db/matcher/expression_where.cpp
index af678c9e5bb..c1d1b5d307c 100644
--- a/src/mongo/db/matcher/expression_where.cpp
+++ b/src/mongo/db/matcher/expression_where.cpp
@@ -50,9 +50,9 @@ using std::string;
using std::stringstream;
using stdx::make_unique;
-WhereMatchExpression::WhereMatchExpression(OperationContext* txn, WhereParams params)
- : WhereMatchExpressionBase(std::move(params)), _txn(txn) {
- invariant(_txn != NULL);
+WhereMatchExpression::WhereMatchExpression(OperationContext* opCtx, WhereParams params)
+ : WhereMatchExpressionBase(std::move(params)), _opCtx(opCtx) {
+ invariant(_opCtx != NULL);
_func = 0;
}
@@ -72,7 +72,7 @@ Status WhereMatchExpression::init(StringData dbName) {
AuthorizationSession::get(Client::getCurrent())->getAuthenticatedUserNamesToken();
try {
- _scope = getGlobalScriptEngine()->getPooledScope(_txn, _dbName, "where" + userToken);
+ _scope = getGlobalScriptEngine()->getPooledScope(_opCtx, _dbName, "where" + userToken);
_func = _scope->createFunction(getCode().c_str());
} catch (...) {
return exceptionToStatus();
@@ -112,7 +112,8 @@ unique_ptr<MatchExpression> WhereMatchExpression::shallowClone() const {
WhereParams params;
params.code = getCode();
params.scope = getScope();
- unique_ptr<WhereMatchExpression> e = make_unique<WhereMatchExpression>(_txn, std::move(params));
+ unique_ptr<WhereMatchExpression> e =
+ make_unique<WhereMatchExpression>(_opCtx, std::move(params));
e->init(_dbName);
if (getTag()) {
e->setTag(getTag()->clone());
diff --git a/src/mongo/db/matcher/expression_where.h b/src/mongo/db/matcher/expression_where.h
index 780c169b217..26b55e4f2ea 100644
--- a/src/mongo/db/matcher/expression_where.h
+++ b/src/mongo/db/matcher/expression_where.h
@@ -37,7 +37,7 @@ class OperationContext;
class WhereMatchExpression final : public WhereMatchExpressionBase {
public:
- WhereMatchExpression(OperationContext* txn, WhereParams params);
+ WhereMatchExpression(OperationContext* opCtx, WhereParams params);
Status init(StringData dbName);
@@ -51,7 +51,7 @@ private:
std::unique_ptr<Scope> _scope;
ScriptingFunction _func;
- OperationContext* const _txn;
+ OperationContext* const _opCtx;
};
} // namespace mongo
diff --git a/src/mongo/db/matcher/extensions_callback_real.cpp b/src/mongo/db/matcher/extensions_callback_real.cpp
index 0b7e815ca58..f61928b4e2c 100644
--- a/src/mongo/db/matcher/extensions_callback_real.cpp
+++ b/src/mongo/db/matcher/extensions_callback_real.cpp
@@ -36,8 +36,8 @@
namespace mongo {
-ExtensionsCallbackReal::ExtensionsCallbackReal(OperationContext* txn, const NamespaceString* nss)
- : _txn(txn), _nss(nss) {}
+ExtensionsCallbackReal::ExtensionsCallbackReal(OperationContext* opCtx, const NamespaceString* nss)
+ : _opCtx(opCtx), _nss(nss) {}
StatusWithMatchExpression ExtensionsCallbackReal::parseText(BSONElement text) const {
auto textParams = extractTextMatchExpressionParams(text);
@@ -46,7 +46,7 @@ StatusWithMatchExpression ExtensionsCallbackReal::parseText(BSONElement text) co
}
auto exp = stdx::make_unique<TextMatchExpression>();
- Status status = exp->init(_txn, *_nss, std::move(textParams.getValue()));
+ Status status = exp->init(_opCtx, *_nss, std::move(textParams.getValue()));
if (!status.isOK()) {
return status;
}
@@ -59,7 +59,7 @@ StatusWithMatchExpression ExtensionsCallbackReal::parseWhere(BSONElement where)
return whereParams.getStatus();
}
- auto exp = stdx::make_unique<WhereMatchExpression>(_txn, std::move(whereParams.getValue()));
+ auto exp = stdx::make_unique<WhereMatchExpression>(_opCtx, std::move(whereParams.getValue()));
Status status = exp->init(_nss->db());
if (!status.isOK()) {
return status;
diff --git a/src/mongo/db/matcher/extensions_callback_real.h b/src/mongo/db/matcher/extensions_callback_real.h
index 45b97e4b9c3..2cfbb043597 100644
--- a/src/mongo/db/matcher/extensions_callback_real.h
+++ b/src/mongo/db/matcher/extensions_callback_real.h
@@ -42,13 +42,14 @@ class OperationContext;
class ExtensionsCallbackReal : public ExtensionsCallback {
public:
/**
- * Does not take ownership of 'nss' or 'txn'.
+ * Does not take ownership of 'nss' or 'opCtx'.
*
- * 'nss' must outlive this object. 'txn' must outlive this object also; in addition, 'txn' must
+ * 'nss' must outlive this object. 'opCtx' must outlive this object also; in addition, 'opCtx'
+ * must
* outlive any MatchExpression objects generated by these callbacks (as the generated objects
- * are allowed to keep a reference to 'txn').
+ * are allowed to keep a reference to 'opCtx').
*/
- ExtensionsCallbackReal(OperationContext* txn, const NamespaceString* nss);
+ ExtensionsCallbackReal(OperationContext* opCtx, const NamespaceString* nss);
/**
* Returns a TextMatchExpression, or an error Status if parsing fails.
@@ -61,7 +62,7 @@ public:
StatusWithMatchExpression parseWhere(BSONElement where) const final;
private:
- OperationContext* const _txn;
+ OperationContext* const _opCtx;
const NamespaceString* const _nss;
};
diff --git a/src/mongo/db/op_observer.h b/src/mongo/db/op_observer.h
index 15b63d89c0c..7326a368614 100644
--- a/src/mongo/db/op_observer.h
+++ b/src/mongo/db/op_observer.h
@@ -66,17 +66,17 @@ public:
OpObserver() = default;
virtual ~OpObserver() = default;
- virtual void onCreateIndex(OperationContext* txn,
+ virtual void onCreateIndex(OperationContext* opCtx,
const std::string& ns,
BSONObj indexDoc,
bool fromMigrate) = 0;
- virtual void onInserts(OperationContext* txn,
+ virtual void onInserts(OperationContext* opCtx,
const NamespaceString& ns,
std::vector<BSONObj>::const_iterator begin,
std::vector<BSONObj>::const_iterator end,
bool fromMigrate) = 0;
- virtual void onUpdate(OperationContext* txn, const OplogUpdateEntryArgs& args) = 0;
- virtual CollectionShardingState::DeleteState aboutToDelete(OperationContext* txn,
+ virtual void onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) = 0;
+ virtual CollectionShardingState::DeleteState aboutToDelete(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& doc) = 0;
/**
@@ -88,33 +88,34 @@ public:
* so should be ignored by the user as an internal maintenance operation and not a
* real delete.
*/
- virtual void onDelete(OperationContext* txn,
+ virtual void onDelete(OperationContext* opCtx,
const NamespaceString& ns,
CollectionShardingState::DeleteState deleteState,
bool fromMigrate) = 0;
- virtual void onOpMessage(OperationContext* txn, const BSONObj& msgObj) = 0;
- virtual void onCreateCollection(OperationContext* txn,
+ virtual void onOpMessage(OperationContext* opCtx, const BSONObj& msgObj) = 0;
+ virtual void onCreateCollection(OperationContext* opCtx,
const NamespaceString& collectionName,
const CollectionOptions& options,
const BSONObj& idIndex) = 0;
- virtual void onCollMod(OperationContext* txn,
+ virtual void onCollMod(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& collModCmd) = 0;
- virtual void onDropDatabase(OperationContext* txn, const std::string& dbName) = 0;
- virtual void onDropCollection(OperationContext* txn, const NamespaceString& collectionName) = 0;
- virtual void onDropIndex(OperationContext* txn,
+ virtual void onDropDatabase(OperationContext* opCtx, const std::string& dbName) = 0;
+ virtual void onDropCollection(OperationContext* opCtx,
+ const NamespaceString& collectionName) = 0;
+ virtual void onDropIndex(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& idxDescriptor) = 0;
- virtual void onRenameCollection(OperationContext* txn,
+ virtual void onRenameCollection(OperationContext* opCtx,
const NamespaceString& fromCollection,
const NamespaceString& toCollection,
bool dropTarget,
bool stayTemp) = 0;
- virtual void onApplyOps(OperationContext* txn,
+ virtual void onApplyOps(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& applyOpCmd) = 0;
- virtual void onEmptyCapped(OperationContext* txn, const NamespaceString& collectionName) = 0;
- virtual void onConvertToCapped(OperationContext* txn,
+ virtual void onEmptyCapped(OperationContext* opCtx, const NamespaceString& collectionName) = 0;
+ virtual void onConvertToCapped(OperationContext* opCtx,
const NamespaceString& collectionName,
double size) = 0;
};
diff --git a/src/mongo/db/op_observer_impl.cpp b/src/mongo/db/op_observer_impl.cpp
index 00560851a7a..63501a4af2e 100644
--- a/src/mongo/db/op_observer_impl.cpp
+++ b/src/mongo/db/op_observer_impl.cpp
@@ -45,36 +45,36 @@
namespace mongo {
-void OpObserverImpl::onCreateIndex(OperationContext* txn,
+void OpObserverImpl::onCreateIndex(OperationContext* opCtx,
const std::string& ns,
BSONObj indexDoc,
bool fromMigrate) {
- repl::logOp(txn, "i", ns.c_str(), indexDoc, nullptr, fromMigrate);
- AuthorizationManager::get(txn->getServiceContext())
- ->logOp(txn, "i", ns.c_str(), indexDoc, nullptr);
+ repl::logOp(opCtx, "i", ns.c_str(), indexDoc, nullptr, fromMigrate);
+ AuthorizationManager::get(opCtx->getServiceContext())
+ ->logOp(opCtx, "i", ns.c_str(), indexDoc, nullptr);
- auto css = CollectionShardingState::get(txn, ns);
+ auto css = CollectionShardingState::get(opCtx, ns);
if (!fromMigrate) {
- css->onInsertOp(txn, indexDoc);
+ css->onInsertOp(opCtx, indexDoc);
}
- logOpForDbHash(txn, ns.c_str());
+ logOpForDbHash(opCtx, ns.c_str());
}
-void OpObserverImpl::onInserts(OperationContext* txn,
+void OpObserverImpl::onInserts(OperationContext* opCtx,
const NamespaceString& nss,
std::vector<BSONObj>::const_iterator begin,
std::vector<BSONObj>::const_iterator end,
bool fromMigrate) {
- repl::logOps(txn, "i", nss, begin, end, fromMigrate);
+ repl::logOps(opCtx, "i", nss, begin, end, fromMigrate);
- auto css = CollectionShardingState::get(txn, nss.ns());
+ auto css = CollectionShardingState::get(opCtx, nss.ns());
const char* ns = nss.ns().c_str();
for (auto it = begin; it != end; it++) {
- AuthorizationManager::get(txn->getServiceContext())->logOp(txn, "i", ns, *it, nullptr);
+ AuthorizationManager::get(opCtx->getServiceContext())->logOp(opCtx, "i", ns, *it, nullptr);
if (!fromMigrate) {
- css->onInsertOp(txn, *it);
+ css->onInsertOp(opCtx, *it);
}
}
@@ -84,38 +84,38 @@ void OpObserverImpl::onInserts(OperationContext* txn,
}
}
- logOpForDbHash(txn, ns);
+ logOpForDbHash(opCtx, ns);
if (strstr(ns, ".system.js")) {
- Scope::storedFuncMod(txn);
+ Scope::storedFuncMod(opCtx);
}
if (nss.coll() == DurableViewCatalog::viewsCollectionName()) {
- DurableViewCatalog::onExternalChange(txn, nss);
+ DurableViewCatalog::onExternalChange(opCtx, nss);
}
}
-void OpObserverImpl::onUpdate(OperationContext* txn, const OplogUpdateEntryArgs& args) {
+void OpObserverImpl::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) {
// Do not log a no-op operation; see SERVER-21738
if (args.update.isEmpty()) {
return;
}
- repl::logOp(txn, "u", args.ns.c_str(), args.update, &args.criteria, args.fromMigrate);
- AuthorizationManager::get(txn->getServiceContext())
- ->logOp(txn, "u", args.ns.c_str(), args.update, &args.criteria);
+ repl::logOp(opCtx, "u", args.ns.c_str(), args.update, &args.criteria, args.fromMigrate);
+ AuthorizationManager::get(opCtx->getServiceContext())
+ ->logOp(opCtx, "u", args.ns.c_str(), args.update, &args.criteria);
- auto css = CollectionShardingState::get(txn, args.ns);
+ auto css = CollectionShardingState::get(opCtx, args.ns);
if (!args.fromMigrate) {
- css->onUpdateOp(txn, args.updatedDoc);
+ css->onUpdateOp(opCtx, args.updatedDoc);
}
- logOpForDbHash(txn, args.ns.c_str());
+ logOpForDbHash(opCtx, args.ns.c_str());
if (strstr(args.ns.c_str(), ".system.js")) {
- Scope::storedFuncMod(txn);
+ Scope::storedFuncMod(opCtx);
}
NamespaceString nss(args.ns);
if (nss.coll() == DurableViewCatalog::viewsCollectionName()) {
- DurableViewCatalog::onExternalChange(txn, nss);
+ DurableViewCatalog::onExternalChange(opCtx, nss);
}
if (args.ns == FeatureCompatibilityVersion::kCollection) {
@@ -123,7 +123,7 @@ void OpObserverImpl::onUpdate(OperationContext* txn, const OplogUpdateEntryArgs&
}
}
-CollectionShardingState::DeleteState OpObserverImpl::aboutToDelete(OperationContext* txn,
+CollectionShardingState::DeleteState OpObserverImpl::aboutToDelete(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& doc) {
CollectionShardingState::DeleteState deleteState;
@@ -132,45 +132,45 @@ CollectionShardingState::DeleteState OpObserverImpl::aboutToDelete(OperationCont
deleteState.idDoc = idElement.wrap();
}
- auto css = CollectionShardingState::get(txn, ns.ns());
- deleteState.isMigrating = css->isDocumentInMigratingChunk(txn, doc);
+ auto css = CollectionShardingState::get(opCtx, ns.ns());
+ deleteState.isMigrating = css->isDocumentInMigratingChunk(opCtx, doc);
return deleteState;
}
-void OpObserverImpl::onDelete(OperationContext* txn,
+void OpObserverImpl::onDelete(OperationContext* opCtx,
const NamespaceString& ns,
CollectionShardingState::DeleteState deleteState,
bool fromMigrate) {
if (deleteState.idDoc.isEmpty())
return;
- repl::logOp(txn, "d", ns.ns().c_str(), deleteState.idDoc, nullptr, fromMigrate);
- AuthorizationManager::get(txn->getServiceContext())
- ->logOp(txn, "d", ns.ns().c_str(), deleteState.idDoc, nullptr);
+ repl::logOp(opCtx, "d", ns.ns().c_str(), deleteState.idDoc, nullptr, fromMigrate);
+ AuthorizationManager::get(opCtx->getServiceContext())
+ ->logOp(opCtx, "d", ns.ns().c_str(), deleteState.idDoc, nullptr);
- auto css = CollectionShardingState::get(txn, ns.ns());
+ auto css = CollectionShardingState::get(opCtx, ns.ns());
if (!fromMigrate) {
- css->onDeleteOp(txn, deleteState);
+ css->onDeleteOp(opCtx, deleteState);
}
- logOpForDbHash(txn, ns.ns().c_str());
+ logOpForDbHash(opCtx, ns.ns().c_str());
if (ns.coll() == "system.js") {
- Scope::storedFuncMod(txn);
+ Scope::storedFuncMod(opCtx);
}
if (ns.coll() == DurableViewCatalog::viewsCollectionName()) {
- DurableViewCatalog::onExternalChange(txn, ns);
+ DurableViewCatalog::onExternalChange(opCtx, ns);
}
if (ns.ns() == FeatureCompatibilityVersion::kCollection) {
FeatureCompatibilityVersion::onDelete(deleteState.idDoc);
}
}
-void OpObserverImpl::onOpMessage(OperationContext* txn, const BSONObj& msgObj) {
- repl::logOp(txn, "n", "", msgObj, nullptr, false);
+void OpObserverImpl::onOpMessage(OperationContext* opCtx, const BSONObj& msgObj) {
+ repl::logOp(opCtx, "n", "", msgObj, nullptr, false);
}
-void OpObserverImpl::onCreateCollection(OperationContext* txn,
+void OpObserverImpl::onCreateCollection(OperationContext* opCtx,
const NamespaceString& collectionName,
const CollectionOptions& options,
const BSONObj& idIndex) {
@@ -193,14 +193,14 @@ void OpObserverImpl::onCreateCollection(OperationContext* txn,
if (!collectionName.isSystemDotProfile()) {
// do not replicate system.profile modifications
- repl::logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
+ repl::logOp(opCtx, "c", dbName.c_str(), cmdObj, nullptr, false);
}
- getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), cmdObj, nullptr);
- logOpForDbHash(txn, dbName.c_str());
+ getGlobalAuthorizationManager()->logOp(opCtx, "c", dbName.c_str(), cmdObj, nullptr);
+ logOpForDbHash(opCtx, dbName.c_str());
}
-void OpObserverImpl::onCollMod(OperationContext* txn,
+void OpObserverImpl::onCollMod(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& collModCmd) {
BSONElement first = collModCmd.firstElement();
@@ -208,62 +208,62 @@ void OpObserverImpl::onCollMod(OperationContext* txn,
if (!NamespaceString(NamespaceString(dbName).db(), coll).isSystemDotProfile()) {
// do not replicate system.profile modifications
- repl::logOp(txn, "c", dbName.c_str(), collModCmd, nullptr, false);
+ repl::logOp(opCtx, "c", dbName.c_str(), collModCmd, nullptr, false);
}
- getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), collModCmd, nullptr);
- logOpForDbHash(txn, dbName.c_str());
+ getGlobalAuthorizationManager()->logOp(opCtx, "c", dbName.c_str(), collModCmd, nullptr);
+ logOpForDbHash(opCtx, dbName.c_str());
}
-void OpObserverImpl::onDropDatabase(OperationContext* txn, const std::string& dbName) {
+void OpObserverImpl::onDropDatabase(OperationContext* opCtx, const std::string& dbName) {
BSONObj cmdObj = BSON("dropDatabase" << 1);
- repl::logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
+ repl::logOp(opCtx, "c", dbName.c_str(), cmdObj, nullptr, false);
if (NamespaceString(dbName).db() == FeatureCompatibilityVersion::kDatabase) {
FeatureCompatibilityVersion::onDropCollection();
}
- getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), cmdObj, nullptr);
- logOpForDbHash(txn, dbName.c_str());
+ getGlobalAuthorizationManager()->logOp(opCtx, "c", dbName.c_str(), cmdObj, nullptr);
+ logOpForDbHash(opCtx, dbName.c_str());
}
-void OpObserverImpl::onDropCollection(OperationContext* txn,
+void OpObserverImpl::onDropCollection(OperationContext* opCtx,
const NamespaceString& collectionName) {
std::string dbName = collectionName.db().toString() + ".$cmd";
BSONObj cmdObj = BSON("drop" << collectionName.coll().toString());
if (!collectionName.isSystemDotProfile()) {
// do not replicate system.profile modifications
- repl::logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
+ repl::logOp(opCtx, "c", dbName.c_str(), cmdObj, nullptr, false);
}
if (collectionName.coll() == DurableViewCatalog::viewsCollectionName()) {
- DurableViewCatalog::onExternalChange(txn, collectionName);
+ DurableViewCatalog::onExternalChange(opCtx, collectionName);
}
if (collectionName.ns() == FeatureCompatibilityVersion::kCollection) {
FeatureCompatibilityVersion::onDropCollection();
}
- getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), cmdObj, nullptr);
+ getGlobalAuthorizationManager()->logOp(opCtx, "c", dbName.c_str(), cmdObj, nullptr);
- auto css = CollectionShardingState::get(txn, collectionName);
- css->onDropCollection(txn, collectionName);
+ auto css = CollectionShardingState::get(opCtx, collectionName);
+ css->onDropCollection(opCtx, collectionName);
- logOpForDbHash(txn, dbName.c_str());
+ logOpForDbHash(opCtx, dbName.c_str());
}
-void OpObserverImpl::onDropIndex(OperationContext* txn,
+void OpObserverImpl::onDropIndex(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& idxDescriptor) {
- repl::logOp(txn, "c", dbName.c_str(), idxDescriptor, nullptr, false);
+ repl::logOp(opCtx, "c", dbName.c_str(), idxDescriptor, nullptr, false);
- getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), idxDescriptor, nullptr);
- logOpForDbHash(txn, dbName.c_str());
+ getGlobalAuthorizationManager()->logOp(opCtx, "c", dbName.c_str(), idxDescriptor, nullptr);
+ logOpForDbHash(opCtx, dbName.c_str());
}
-void OpObserverImpl::onRenameCollection(OperationContext* txn,
+void OpObserverImpl::onRenameCollection(OperationContext* opCtx,
const NamespaceString& fromCollection,
const NamespaceString& toCollection,
bool dropTarget,
@@ -275,27 +275,27 @@ void OpObserverImpl::onRenameCollection(OperationContext* txn,
<< "dropTarget"
<< dropTarget);
- repl::logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
+ repl::logOp(opCtx, "c", dbName.c_str(), cmdObj, nullptr, false);
if (fromCollection.coll() == DurableViewCatalog::viewsCollectionName() ||
toCollection.coll() == DurableViewCatalog::viewsCollectionName()) {
DurableViewCatalog::onExternalChange(
- txn, NamespaceString(DurableViewCatalog::viewsCollectionName()));
+ opCtx, NamespaceString(DurableViewCatalog::viewsCollectionName()));
}
- getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), cmdObj, nullptr);
- logOpForDbHash(txn, dbName.c_str());
+ getGlobalAuthorizationManager()->logOp(opCtx, "c", dbName.c_str(), cmdObj, nullptr);
+ logOpForDbHash(opCtx, dbName.c_str());
}
-void OpObserverImpl::onApplyOps(OperationContext* txn,
+void OpObserverImpl::onApplyOps(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& applyOpCmd) {
- repl::logOp(txn, "c", dbName.c_str(), applyOpCmd, nullptr, false);
+ repl::logOp(opCtx, "c", dbName.c_str(), applyOpCmd, nullptr, false);
- getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), applyOpCmd, nullptr);
- logOpForDbHash(txn, dbName.c_str());
+ getGlobalAuthorizationManager()->logOp(opCtx, "c", dbName.c_str(), applyOpCmd, nullptr);
+ logOpForDbHash(opCtx, dbName.c_str());
}
-void OpObserverImpl::onConvertToCapped(OperationContext* txn,
+void OpObserverImpl::onConvertToCapped(OperationContext* opCtx,
const NamespaceString& collectionName,
double size) {
std::string dbName = collectionName.db().toString() + ".$cmd";
@@ -303,24 +303,24 @@ void OpObserverImpl::onConvertToCapped(OperationContext* txn,
if (!collectionName.isSystemDotProfile()) {
// do not replicate system.profile modifications
- repl::logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
+ repl::logOp(opCtx, "c", dbName.c_str(), cmdObj, nullptr, false);
}
- getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), cmdObj, nullptr);
- logOpForDbHash(txn, dbName.c_str());
+ getGlobalAuthorizationManager()->logOp(opCtx, "c", dbName.c_str(), cmdObj, nullptr);
+ logOpForDbHash(opCtx, dbName.c_str());
}
-void OpObserverImpl::onEmptyCapped(OperationContext* txn, const NamespaceString& collectionName) {
+void OpObserverImpl::onEmptyCapped(OperationContext* opCtx, const NamespaceString& collectionName) {
std::string dbName = collectionName.db().toString() + ".$cmd";
BSONObj cmdObj = BSON("emptycapped" << collectionName.coll());
if (!collectionName.isSystemDotProfile()) {
// do not replicate system.profile modifications
- repl::logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
+ repl::logOp(opCtx, "c", dbName.c_str(), cmdObj, nullptr, false);
}
- getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), cmdObj, nullptr);
- logOpForDbHash(txn, dbName.c_str());
+ getGlobalAuthorizationManager()->logOp(opCtx, "c", dbName.c_str(), cmdObj, nullptr);
+ logOpForDbHash(opCtx, dbName.c_str());
}
} // namespace mongo
diff --git a/src/mongo/db/op_observer_impl.h b/src/mongo/db/op_observer_impl.h
index d8ccfbd678d..bcd9e45171f 100644
--- a/src/mongo/db/op_observer_impl.h
+++ b/src/mongo/db/op_observer_impl.h
@@ -39,46 +39,46 @@ public:
OpObserverImpl() = default;
virtual ~OpObserverImpl() = default;
- void onCreateIndex(OperationContext* txn,
+ void onCreateIndex(OperationContext* opCtx,
const std::string& ns,
BSONObj indexDoc,
bool fromMigrate) override;
- void onInserts(OperationContext* txn,
+ void onInserts(OperationContext* opCtx,
const NamespaceString& ns,
std::vector<BSONObj>::const_iterator begin,
std::vector<BSONObj>::const_iterator end,
bool fromMigrate) override;
- void onUpdate(OperationContext* txn, const OplogUpdateEntryArgs& args) override;
- CollectionShardingState::DeleteState aboutToDelete(OperationContext* txn,
+ void onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) override;
+ CollectionShardingState::DeleteState aboutToDelete(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& doc) override;
- void onDelete(OperationContext* txn,
+ void onDelete(OperationContext* opCtx,
const NamespaceString& ns,
CollectionShardingState::DeleteState deleteState,
bool fromMigrate) override;
- void onOpMessage(OperationContext* txn, const BSONObj& msgObj) override;
- void onCreateCollection(OperationContext* txn,
+ void onOpMessage(OperationContext* opCtx, const BSONObj& msgObj) override;
+ void onCreateCollection(OperationContext* opCtx,
const NamespaceString& collectionName,
const CollectionOptions& options,
const BSONObj& idIndex) override;
- void onCollMod(OperationContext* txn,
+ void onCollMod(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& collModCmd) override;
- void onDropDatabase(OperationContext* txn, const std::string& dbName) override;
- void onDropCollection(OperationContext* txn, const NamespaceString& collectionName) override;
- void onDropIndex(OperationContext* txn,
+ void onDropDatabase(OperationContext* opCtx, const std::string& dbName) override;
+ void onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName) override;
+ void onDropIndex(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& idxDescriptor) override;
- void onRenameCollection(OperationContext* txn,
+ void onRenameCollection(OperationContext* opCtx,
const NamespaceString& fromCollection,
const NamespaceString& toCollection,
bool dropTarget,
bool stayTemp) override;
- void onApplyOps(OperationContext* txn,
+ void onApplyOps(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& applyOpCmd) override;
- void onEmptyCapped(OperationContext* txn, const NamespaceString& collectionName);
- void onConvertToCapped(OperationContext* txn,
+ void onEmptyCapped(OperationContext* opCtx, const NamespaceString& collectionName);
+ void onConvertToCapped(OperationContext* opCtx,
const NamespaceString& collectionName,
double size) override;
};
diff --git a/src/mongo/db/op_observer_noop.h b/src/mongo/db/op_observer_noop.h
index f9d1e1d9182..a5c76a04e11 100644
--- a/src/mongo/db/op_observer_noop.h
+++ b/src/mongo/db/op_observer_noop.h
@@ -39,46 +39,46 @@ public:
OpObserverNoop() = default;
virtual ~OpObserverNoop() = default;
- void onCreateIndex(OperationContext* txn,
+ void onCreateIndex(OperationContext* opCtx,
const std::string& ns,
BSONObj indexDoc,
bool fromMigrate) override;
- void onInserts(OperationContext* txn,
+ void onInserts(OperationContext* opCtx,
const NamespaceString& ns,
std::vector<BSONObj>::const_iterator begin,
std::vector<BSONObj>::const_iterator end,
bool fromMigrate) override;
- void onUpdate(OperationContext* txn, const OplogUpdateEntryArgs& args) override;
- CollectionShardingState::DeleteState aboutToDelete(OperationContext* txn,
+ void onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) override;
+ CollectionShardingState::DeleteState aboutToDelete(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& doc) override;
- void onDelete(OperationContext* txn,
+ void onDelete(OperationContext* opCtx,
const NamespaceString& ns,
CollectionShardingState::DeleteState deleteState,
bool fromMigrate) override;
- void onOpMessage(OperationContext* txn, const BSONObj& msgObj) override;
- void onCreateCollection(OperationContext* txn,
+ void onOpMessage(OperationContext* opCtx, const BSONObj& msgObj) override;
+ void onCreateCollection(OperationContext* opCtx,
const NamespaceString& collectionName,
const CollectionOptions& options,
const BSONObj& idIndex) override;
- void onCollMod(OperationContext* txn,
+ void onCollMod(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& collModCmd) override;
- void onDropDatabase(OperationContext* txn, const std::string& dbName) override;
- void onDropCollection(OperationContext* txn, const NamespaceString& collectionName) override;
- void onDropIndex(OperationContext* txn,
+ void onDropDatabase(OperationContext* opCtx, const std::string& dbName) override;
+ void onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName) override;
+ void onDropIndex(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& idxDescriptor) override;
- void onRenameCollection(OperationContext* txn,
+ void onRenameCollection(OperationContext* opCtx,
const NamespaceString& fromCollection,
const NamespaceString& toCollection,
bool dropTarget,
bool stayTemp) override;
- void onApplyOps(OperationContext* txn,
+ void onApplyOps(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& applyOpCmd) override;
- void onEmptyCapped(OperationContext* txn, const NamespaceString& collectionName);
- void onConvertToCapped(OperationContext* txn,
+ void onEmptyCapped(OperationContext* opCtx, const NamespaceString& collectionName);
+ void onConvertToCapped(OperationContext* opCtx,
const NamespaceString& collectionName,
double size) override;
};
diff --git a/src/mongo/db/operation_context.h b/src/mongo/db/operation_context.h
index 2c8be360a77..15688438561 100644
--- a/src/mongo/db/operation_context.h
+++ b/src/mongo/db/operation_context.h
@@ -425,47 +425,47 @@ class WriteUnitOfWork {
MONGO_DISALLOW_COPYING(WriteUnitOfWork);
public:
- WriteUnitOfWork(OperationContext* txn)
- : _txn(txn),
+ WriteUnitOfWork(OperationContext* opCtx)
+ : _opCtx(opCtx),
_committed(false),
- _toplevel(txn->_ruState == OperationContext::kNotInUnitOfWork) {
+ _toplevel(opCtx->_ruState == OperationContext::kNotInUnitOfWork) {
uassert(ErrorCodes::IllegalOperation,
"Cannot execute a write operation in read-only mode",
!storageGlobalParams.readOnly);
- _txn->lockState()->beginWriteUnitOfWork();
+ _opCtx->lockState()->beginWriteUnitOfWork();
if (_toplevel) {
- _txn->recoveryUnit()->beginUnitOfWork(_txn);
- _txn->_ruState = OperationContext::kActiveUnitOfWork;
+ _opCtx->recoveryUnit()->beginUnitOfWork(_opCtx);
+ _opCtx->_ruState = OperationContext::kActiveUnitOfWork;
}
}
~WriteUnitOfWork() {
dassert(!storageGlobalParams.readOnly);
if (!_committed) {
- invariant(_txn->_ruState != OperationContext::kNotInUnitOfWork);
+ invariant(_opCtx->_ruState != OperationContext::kNotInUnitOfWork);
if (_toplevel) {
- _txn->recoveryUnit()->abortUnitOfWork();
- _txn->_ruState = OperationContext::kNotInUnitOfWork;
+ _opCtx->recoveryUnit()->abortUnitOfWork();
+ _opCtx->_ruState = OperationContext::kNotInUnitOfWork;
} else {
- _txn->_ruState = OperationContext::kFailedUnitOfWork;
+ _opCtx->_ruState = OperationContext::kFailedUnitOfWork;
}
- _txn->lockState()->endWriteUnitOfWork();
+ _opCtx->lockState()->endWriteUnitOfWork();
}
}
void commit() {
invariant(!_committed);
- invariant(_txn->_ruState == OperationContext::kActiveUnitOfWork);
+ invariant(_opCtx->_ruState == OperationContext::kActiveUnitOfWork);
if (_toplevel) {
- _txn->recoveryUnit()->commitUnitOfWork();
- _txn->_ruState = OperationContext::kNotInUnitOfWork;
+ _opCtx->recoveryUnit()->commitUnitOfWork();
+ _opCtx->_ruState = OperationContext::kNotInUnitOfWork;
}
- _txn->lockState()->endWriteUnitOfWork();
+ _opCtx->lockState()->endWriteUnitOfWork();
_committed = true;
}
private:
- OperationContext* const _txn;
+ OperationContext* const _opCtx;
bool _committed;
bool _toplevel;
@@ -490,16 +490,16 @@ public:
* only read (MODE_IS), or needs to run without other writers (MODE_S) or any other
* operations (MODE_X) on the server.
*/
- ScopedTransaction(OperationContext* txn, LockMode mode) : _txn(txn) {}
+ ScopedTransaction(OperationContext* opCtx, LockMode mode) : _opCtx(opCtx) {}
~ScopedTransaction() {
- if (!_txn->lockState()->isLocked()) {
- _txn->recoveryUnit()->abandonSnapshot();
+ if (!_opCtx->lockState()->isLocked()) {
+ _opCtx->recoveryUnit()->abandonSnapshot();
}
}
private:
- OperationContext* _txn;
+ OperationContext* _opCtx;
};
namespace repl {
@@ -511,17 +511,17 @@ class UnreplicatedWritesBlock {
MONGO_DISALLOW_COPYING(UnreplicatedWritesBlock);
public:
- UnreplicatedWritesBlock(OperationContext* txn)
- : _txn(txn), _shouldReplicateWrites(txn->writesAreReplicated()) {
- txn->setReplicatedWrites(false);
+ UnreplicatedWritesBlock(OperationContext* opCtx)
+ : _opCtx(opCtx), _shouldReplicateWrites(opCtx->writesAreReplicated()) {
+ opCtx->setReplicatedWrites(false);
}
~UnreplicatedWritesBlock() {
- _txn->setReplicatedWrites(_shouldReplicateWrites);
+ _opCtx->setReplicatedWrites(_shouldReplicateWrites);
}
private:
- OperationContext* _txn;
+ OperationContext* _opCtx;
const bool _shouldReplicateWrites;
};
} // namespace repl
diff --git a/src/mongo/db/operation_context_test.cpp b/src/mongo/db/operation_context_test.cpp
index 06c6575208a..532e25b640a 100644
--- a/src/mongo/db/operation_context_test.cpp
+++ b/src/mongo/db/operation_context_test.cpp
@@ -85,37 +85,37 @@ public:
};
TEST_F(OperationDeadlineTests, OperationDeadlineExpiration) {
- auto txn = client->makeOperationContext();
- txn->setDeadlineAfterNowBy(Seconds{1});
+ auto opCtx = client->makeOperationContext();
+ opCtx->setDeadlineAfterNowBy(Seconds{1});
mockClock->advance(Milliseconds{500});
- ASSERT_OK(txn->checkForInterruptNoAssert());
+ ASSERT_OK(opCtx->checkForInterruptNoAssert());
// 1ms before relative deadline reports no interrupt
mockClock->advance(Milliseconds{499});
- ASSERT_OK(txn->checkForInterruptNoAssert());
+ ASSERT_OK(opCtx->checkForInterruptNoAssert());
// Exactly at deadline reports no interrupt, because setDeadlineAfterNowBy adds one clock
// precision unit to the deadline, to ensure that the deadline does not expire in less than the
// requested amount of time.
mockClock->advance(Milliseconds{1});
- ASSERT_OK(txn->checkForInterruptNoAssert());
+ ASSERT_OK(opCtx->checkForInterruptNoAssert());
// Since the mock clock's precision is 1ms, at test start + 1001 ms, we expect
// checkForInterruptNoAssert to return ExceededTimeLimit.
mockClock->advance(Milliseconds{1});
- ASSERT_EQ(ErrorCodes::ExceededTimeLimit, txn->checkForInterruptNoAssert());
+ ASSERT_EQ(ErrorCodes::ExceededTimeLimit, opCtx->checkForInterruptNoAssert());
// Also at times greater than start + 1001ms, we expect checkForInterruptNoAssert to keep
// returning ExceededTimeLimit.
mockClock->advance(Milliseconds{1});
- ASSERT_EQ(ErrorCodes::ExceededTimeLimit, txn->checkForInterruptNoAssert());
+ ASSERT_EQ(ErrorCodes::ExceededTimeLimit, opCtx->checkForInterruptNoAssert());
}
template <typename D>
void assertLargeRelativeDeadlineLikeInfinity(Client& client, D maxTime) {
- auto txn = client.makeOperationContext();
- txn->setDeadlineAfterNowBy(maxTime);
- ASSERT_FALSE(txn->hasDeadline()) << "Tried to set maxTime to " << maxTime;
+ auto opCtx = client.makeOperationContext();
+ opCtx->setDeadlineAfterNowBy(maxTime);
+ ASSERT_FALSE(opCtx->hasDeadline()) << "Tried to set maxTime to " << maxTime;
}
TEST_F(OperationDeadlineTests, VeryLargeRelativeDeadlinesHours) {
@@ -142,72 +142,73 @@ TEST_F(OperationDeadlineTests, VeryLargeRelativeDeadlinesMicroseconds) {
TEST_F(OperationDeadlineTests, VeryLargeRelativeDeadlinesNanoseconds) {
// Nanoseconds::max() is less than Microseconds::max(), so it is possible to set
// a deadline of that duration.
- auto txn = client->makeOperationContext();
- txn->setDeadlineAfterNowBy(Nanoseconds::max());
- ASSERT_TRUE(txn->hasDeadline());
+ auto opCtx = client->makeOperationContext();
+ opCtx->setDeadlineAfterNowBy(Nanoseconds::max());
+ ASSERT_TRUE(opCtx->hasDeadline());
ASSERT_EQ(mockClock->now() + mockClock->getPrecision() +
duration_cast<Milliseconds>(Nanoseconds::max()),
- txn->getDeadline());
+ opCtx->getDeadline());
}
TEST_F(OperationDeadlineTests, WaitForMaxTimeExpiredCV) {
- auto txn = client->makeOperationContext();
- txn->setDeadlineByDate(mockClock->now());
+ auto opCtx = client->makeOperationContext();
+ opCtx->setDeadlineByDate(mockClock->now());
stdx::mutex m;
stdx::condition_variable cv;
stdx::unique_lock<stdx::mutex> lk(m);
- ASSERT_EQ(ErrorCodes::ExceededTimeLimit, txn->waitForConditionOrInterruptNoAssert(cv, lk));
+ ASSERT_EQ(ErrorCodes::ExceededTimeLimit, opCtx->waitForConditionOrInterruptNoAssert(cv, lk));
}
TEST_F(OperationDeadlineTests, WaitForMaxTimeExpiredCVWithWaitUntilSet) {
- auto txn = client->makeOperationContext();
- txn->setDeadlineByDate(mockClock->now());
+ auto opCtx = client->makeOperationContext();
+ opCtx->setDeadlineByDate(mockClock->now());
stdx::mutex m;
stdx::condition_variable cv;
stdx::unique_lock<stdx::mutex> lk(m);
- ASSERT_EQ(ErrorCodes::ExceededTimeLimit,
- txn->waitForConditionOrInterruptNoAssertUntil(cv, lk, mockClock->now() + Seconds{10})
- .getStatus());
+ ASSERT_EQ(
+ ErrorCodes::ExceededTimeLimit,
+ opCtx->waitForConditionOrInterruptNoAssertUntil(cv, lk, mockClock->now() + Seconds{10})
+ .getStatus());
}
TEST_F(OperationDeadlineTests, WaitForKilledOpCV) {
- auto txn = client->makeOperationContext();
- txn->markKilled();
+ auto opCtx = client->makeOperationContext();
+ opCtx->markKilled();
stdx::mutex m;
stdx::condition_variable cv;
stdx::unique_lock<stdx::mutex> lk(m);
- ASSERT_EQ(ErrorCodes::Interrupted, txn->waitForConditionOrInterruptNoAssert(cv, lk));
+ ASSERT_EQ(ErrorCodes::Interrupted, opCtx->waitForConditionOrInterruptNoAssert(cv, lk));
}
TEST_F(OperationDeadlineTests, WaitForUntilExpiredCV) {
- auto txn = client->makeOperationContext();
+ auto opCtx = client->makeOperationContext();
stdx::mutex m;
stdx::condition_variable cv;
stdx::unique_lock<stdx::mutex> lk(m);
ASSERT(stdx::cv_status::timeout ==
unittest::assertGet(
- txn->waitForConditionOrInterruptNoAssertUntil(cv, lk, mockClock->now())));
+ opCtx->waitForConditionOrInterruptNoAssertUntil(cv, lk, mockClock->now())));
}
TEST_F(OperationDeadlineTests, WaitForUntilExpiredCVWithMaxTimeSet) {
- auto txn = client->makeOperationContext();
- txn->setDeadlineByDate(mockClock->now() + Seconds{10});
+ auto opCtx = client->makeOperationContext();
+ opCtx->setDeadlineByDate(mockClock->now() + Seconds{10});
stdx::mutex m;
stdx::condition_variable cv;
stdx::unique_lock<stdx::mutex> lk(m);
ASSERT(stdx::cv_status::timeout ==
unittest::assertGet(
- txn->waitForConditionOrInterruptNoAssertUntil(cv, lk, mockClock->now())));
+ opCtx->waitForConditionOrInterruptNoAssertUntil(cv, lk, mockClock->now())));
}
TEST_F(OperationDeadlineTests, DuringWaitMaxTimeExpirationDominatesUntilExpiration) {
- auto txn = client->makeOperationContext();
- txn->setDeadlineByDate(mockClock->now());
+ auto opCtx = client->makeOperationContext();
+ opCtx->setDeadlineByDate(mockClock->now());
stdx::mutex m;
stdx::condition_variable cv;
stdx::unique_lock<stdx::mutex> lk(m);
ASSERT(ErrorCodes::ExceededTimeLimit ==
- txn->waitForConditionOrInterruptNoAssertUntil(cv, lk, mockClock->now()));
+ opCtx->waitForConditionOrInterruptNoAssertUntil(cv, lk, mockClock->now()));
}
class ThreadedOperationDeadlineTests : public OperationDeadlineTests {
@@ -225,7 +226,7 @@ public:
bool isSignaled = false;
};
- stdx::future<stdx::cv_status> startWaiterWithUntilAndMaxTime(OperationContext* txn,
+ stdx::future<stdx::cv_status> startWaiterWithUntilAndMaxTime(OperationContext* opCtx,
WaitTestState* state,
Date_t until,
Date_t maxTime) {
@@ -233,15 +234,15 @@ public:
auto barrier = std::make_shared<unittest::Barrier>(2);
auto task = stdx::packaged_task<stdx::cv_status()>([=] {
if (maxTime < Date_t::max()) {
- txn->setDeadlineByDate(maxTime);
+ opCtx->setDeadlineByDate(maxTime);
}
auto predicate = [state] { return state->isSignaled; };
stdx::unique_lock<stdx::mutex> lk(state->mutex);
barrier->countDownAndWait();
if (until < Date_t::max()) {
- return txn->waitForConditionOrInterruptUntil(state->cv, lk, until, predicate);
+ return opCtx->waitForConditionOrInterruptUntil(state->cv, lk, until, predicate);
} else {
- txn->waitForConditionOrInterrupt(state->cv, lk, predicate);
+ opCtx->waitForConditionOrInterrupt(state->cv, lk, predicate);
return stdx::cv_status::no_timeout;
}
});
@@ -253,36 +254,36 @@ public:
// barrier until it does.
stdx::lock_guard<stdx::mutex> lk(state->mutex);
- // Assuming that txn has not already been interrupted and that maxTime and until are
+ // Assuming that opCtx has not already been interrupted and that maxTime and until are
// unexpired, we know that the waiter must be blocked in the condition variable, because it
// held the mutex before we tried to acquire it, and only releases it on condition variable
// wait.
return result;
}
- stdx::future<stdx::cv_status> startWaiter(OperationContext* txn, WaitTestState* state) {
- return startWaiterWithUntilAndMaxTime(txn, state, Date_t::max(), Date_t::max());
+ stdx::future<stdx::cv_status> startWaiter(OperationContext* opCtx, WaitTestState* state) {
+ return startWaiterWithUntilAndMaxTime(opCtx, state, Date_t::max(), Date_t::max());
}
};
TEST_F(ThreadedOperationDeadlineTests, KillArrivesWhileWaiting) {
- auto txn = client->makeOperationContext();
+ auto opCtx = client->makeOperationContext();
WaitTestState state;
- auto waiterResult = startWaiter(txn.get(), &state);
+ auto waiterResult = startWaiter(opCtx.get(), &state);
ASSERT(stdx::future_status::ready !=
waiterResult.wait_for(Milliseconds::zero().toSystemDuration()));
{
- stdx::lock_guard<Client> clientLock(*txn->getClient());
- txn->markKilled();
+ stdx::lock_guard<Client> clientLock(*opCtx->getClient());
+ opCtx->markKilled();
}
ASSERT_THROWS_CODE(waiterResult.get(), DBException, ErrorCodes::Interrupted);
}
TEST_F(ThreadedOperationDeadlineTests, MaxTimeExpiresWhileWaiting) {
- auto txn = client->makeOperationContext();
+ auto opCtx = client->makeOperationContext();
WaitTestState state;
const auto startDate = mockClock->now();
- auto waiterResult = startWaiterWithUntilAndMaxTime(txn.get(),
+ auto waiterResult = startWaiterWithUntilAndMaxTime(opCtx.get(),
&state,
startDate + Seconds{60}, // until
startDate + Seconds{10}); // maxTime
@@ -297,10 +298,10 @@ TEST_F(ThreadedOperationDeadlineTests, MaxTimeExpiresWhileWaiting) {
}
TEST_F(ThreadedOperationDeadlineTests, UntilExpiresWhileWaiting) {
- auto txn = client->makeOperationContext();
+ auto opCtx = client->makeOperationContext();
WaitTestState state;
const auto startDate = mockClock->now();
- auto waiterResult = startWaiterWithUntilAndMaxTime(txn.get(),
+ auto waiterResult = startWaiterWithUntilAndMaxTime(opCtx.get(),
&state,
startDate + Seconds{10}, // until
startDate + Seconds{60}); // maxTime
@@ -315,9 +316,9 @@ TEST_F(ThreadedOperationDeadlineTests, UntilExpiresWhileWaiting) {
}
TEST_F(ThreadedOperationDeadlineTests, SignalOne) {
- auto txn = client->makeOperationContext();
+ auto opCtx = client->makeOperationContext();
WaitTestState state;
- auto waiterResult = startWaiter(txn.get(), &state);
+ auto waiterResult = startWaiter(opCtx.get(), &state);
ASSERT(stdx::future_status::ready !=
waiterResult.wait_for(Milliseconds::zero().toSystemDuration()))
@@ -351,10 +352,10 @@ TEST_F(ThreadedOperationDeadlineTests, KillOneSignalAnother) {
}
TEST_F(ThreadedOperationDeadlineTests, SignalBeforeUntilExpires) {
- auto txn = client->makeOperationContext();
+ auto opCtx = client->makeOperationContext();
WaitTestState state;
const auto startDate = mockClock->now();
- auto waiterResult = startWaiterWithUntilAndMaxTime(txn.get(),
+ auto waiterResult = startWaiterWithUntilAndMaxTime(opCtx.get(),
&state,
startDate + Seconds{10}, // until
startDate + Seconds{60}); // maxTime
@@ -369,10 +370,10 @@ TEST_F(ThreadedOperationDeadlineTests, SignalBeforeUntilExpires) {
}
TEST_F(ThreadedOperationDeadlineTests, SignalBeforeMaxTimeExpires) {
- auto txn = client->makeOperationContext();
+ auto opCtx = client->makeOperationContext();
WaitTestState state;
const auto startDate = mockClock->now();
- auto waiterResult = startWaiterWithUntilAndMaxTime(txn.get(),
+ auto waiterResult = startWaiterWithUntilAndMaxTime(opCtx.get(),
&state,
startDate + Seconds{60}, // until
startDate + Seconds{10}); // maxTime
diff --git a/src/mongo/db/ops/delete.cpp b/src/mongo/db/ops/delete.cpp
index 7f509308ad4..636a9aa17ee 100644
--- a/src/mongo/db/ops/delete.cpp
+++ b/src/mongo/db/ops/delete.cpp
@@ -44,7 +44,7 @@ namespace mongo {
justOne: stop after 1 match
god: allow access to system namespaces, and don't yield
*/
-long long deleteObjects(OperationContext* txn,
+long long deleteObjects(OperationContext* opCtx,
Collection* collection,
StringData ns,
BSONObj pattern,
@@ -60,20 +60,20 @@ long long deleteObjects(OperationContext* txn,
request.setFromMigrate(fromMigrate);
request.setYieldPolicy(policy);
- ParsedDelete parsedDelete(txn, &request);
+ ParsedDelete parsedDelete(opCtx, &request);
uassertStatusOK(parsedDelete.parseRequest());
- auto client = txn->getClient();
+ auto client = opCtx->getClient();
auto lastOpAtOperationStart = repl::ReplClientInfo::forClient(client).getLastOp();
std::unique_ptr<PlanExecutor> exec = uassertStatusOK(
- getExecutorDelete(txn, &CurOp::get(txn)->debug(), collection, &parsedDelete));
+ getExecutorDelete(opCtx, &CurOp::get(opCtx)->debug(), collection, &parsedDelete));
uassertStatusOK(exec->executePlan());
// No-ops need to reset lastOp in the client, for write concern.
if (repl::ReplClientInfo::forClient(client).getLastOp() == lastOpAtOperationStart) {
- repl::ReplClientInfo::forClient(client).setLastOpToSystemLastOpTime(txn);
+ repl::ReplClientInfo::forClient(client).setLastOpToSystemLastOpTime(opCtx);
}
return DeleteStage::getNumDeleted(*exec);
diff --git a/src/mongo/db/ops/delete.h b/src/mongo/db/ops/delete.h
index f45a2674cb4..bead641a0b8 100644
--- a/src/mongo/db/ops/delete.h
+++ b/src/mongo/db/ops/delete.h
@@ -39,7 +39,7 @@ namespace mongo {
class Database;
class OperationContext;
-long long deleteObjects(OperationContext* txn,
+long long deleteObjects(OperationContext* opCtx,
Collection* collection,
StringData ns,
BSONObj pattern,
diff --git a/src/mongo/db/ops/parsed_delete.cpp b/src/mongo/db/ops/parsed_delete.cpp
index 6508c99c120..854ff7bb480 100644
--- a/src/mongo/db/ops/parsed_delete.cpp
+++ b/src/mongo/db/ops/parsed_delete.cpp
@@ -48,8 +48,8 @@
namespace mongo {
-ParsedDelete::ParsedDelete(OperationContext* txn, const DeleteRequest* request)
- : _txn(txn), _request(request) {}
+ParsedDelete::ParsedDelete(OperationContext* opCtx, const DeleteRequest* request)
+ : _opCtx(opCtx), _request(request) {}
Status ParsedDelete::parseRequest() {
dassert(!_canonicalQuery.get());
@@ -79,7 +79,7 @@ Status ParsedDelete::parseRequest() {
Status ParsedDelete::parseQueryToCQ() {
dassert(!_canonicalQuery.get());
- const ExtensionsCallbackReal extensionsCallback(_txn, &_request->getNamespaceString());
+ const ExtensionsCallbackReal extensionsCallback(_opCtx, &_request->getNamespaceString());
// The projection needs to be applied after the delete operation, so we do not specify a
// projection during canonicalization.
@@ -99,7 +99,7 @@ Status ParsedDelete::parseQueryToCQ() {
qr->setLimit(1);
}
- auto statusWithCQ = CanonicalQuery::canonicalize(_txn, std::move(qr), extensionsCallback);
+ auto statusWithCQ = CanonicalQuery::canonicalize(_opCtx, std::move(qr), extensionsCallback);
if (statusWithCQ.isOK()) {
_canonicalQuery = std::move(statusWithCQ.getValue());
diff --git a/src/mongo/db/ops/parsed_delete.h b/src/mongo/db/ops/parsed_delete.h
index 1a36b9b78ea..9cda64718a1 100644
--- a/src/mongo/db/ops/parsed_delete.h
+++ b/src/mongo/db/ops/parsed_delete.h
@@ -63,7 +63,7 @@ public:
* The object pointed to by "request" must stay in scope for the life of the constructed
* ParsedDelete.
*/
- ParsedDelete(OperationContext* txn, const DeleteRequest* request);
+ ParsedDelete(OperationContext* opCtx, const DeleteRequest* request);
/**
* Parses the delete request to a canonical query. On success, the parsed delete can be
@@ -106,7 +106,7 @@ public:
private:
// Transactional context. Not owned by us.
- OperationContext* _txn;
+ OperationContext* _opCtx;
// Unowned pointer to the request object that this executor will process.
const DeleteRequest* const _request;
diff --git a/src/mongo/db/ops/parsed_update.cpp b/src/mongo/db/ops/parsed_update.cpp
index da9eba4bd93..6df14116c1f 100644
--- a/src/mongo/db/ops/parsed_update.cpp
+++ b/src/mongo/db/ops/parsed_update.cpp
@@ -39,8 +39,8 @@
namespace mongo {
-ParsedUpdate::ParsedUpdate(OperationContext* txn, const UpdateRequest* request)
- : _txn(txn), _request(request), _driver(UpdateDriver::Options()), _canonicalQuery() {}
+ParsedUpdate::ParsedUpdate(OperationContext* opCtx, const UpdateRequest* request)
+ : _opCtx(opCtx), _request(request), _driver(UpdateDriver::Options()), _canonicalQuery() {}
Status ParsedUpdate::parseRequest() {
// It is invalid to request that the UpdateStage return the prior or newly-updated version
@@ -59,7 +59,7 @@ Status ParsedUpdate::parseRequest() {
"http://dochub.mongodb.org/core/3.4-feature-compatibility.");
}
- auto collator = CollatorFactoryInterface::get(_txn->getServiceContext())
+ auto collator = CollatorFactoryInterface::get(_opCtx->getServiceContext())
->makeFromBSON(_request->getCollation());
if (!collator.isOK()) {
return collator.getStatus();
@@ -93,7 +93,7 @@ Status ParsedUpdate::parseQuery() {
Status ParsedUpdate::parseQueryToCQ() {
dassert(!_canonicalQuery.get());
- const ExtensionsCallbackReal extensionsCallback(_txn, &_request->getNamespaceString());
+ const ExtensionsCallbackReal extensionsCallback(_opCtx, &_request->getNamespaceString());
// The projection needs to be applied after the update operation, so we do not specify a
// projection during canonicalization.
@@ -113,7 +113,7 @@ Status ParsedUpdate::parseQueryToCQ() {
qr->setLimit(1);
}
- auto statusWithCQ = CanonicalQuery::canonicalize(_txn, std::move(qr), extensionsCallback);
+ auto statusWithCQ = CanonicalQuery::canonicalize(_opCtx, std::move(qr), extensionsCallback);
if (statusWithCQ.isOK()) {
_canonicalQuery = std::move(statusWithCQ.getValue());
}
@@ -129,11 +129,11 @@ Status ParsedUpdate::parseUpdate() {
// Config db docs shouldn't get checked for valid field names since the shard key can have
// a dot (".") in it.
const bool shouldValidate =
- !(!_txn->writesAreReplicated() || ns.isConfigDB() || _request->isFromMigration());
+ !(!_opCtx->writesAreReplicated() || ns.isConfigDB() || _request->isFromMigration());
_driver.setLogOp(true);
- _driver.setModOptions(
- ModifierInterface::Options(!_txn->writesAreReplicated(), shouldValidate, _collator.get()));
+ _driver.setModOptions(ModifierInterface::Options(
+ !_opCtx->writesAreReplicated(), shouldValidate, _collator.get()));
return _driver.parse(_request->getUpdates(), _request->isMulti());
}
diff --git a/src/mongo/db/ops/parsed_update.h b/src/mongo/db/ops/parsed_update.h
index 5547cfc8cfd..7f844a1a166 100644
--- a/src/mongo/db/ops/parsed_update.h
+++ b/src/mongo/db/ops/parsed_update.h
@@ -64,7 +64,7 @@ public:
* The object pointed to by "request" must stay in scope for the life of the constructed
* ParsedUpdate.
*/
- ParsedUpdate(OperationContext* txn, const UpdateRequest* request);
+ ParsedUpdate(OperationContext* opCtx, const UpdateRequest* request);
/**
* Parses the update request to a canonical query and an update driver. On success, the
@@ -138,7 +138,7 @@ private:
Status parseUpdate();
// Unowned pointer to the transactional context.
- OperationContext* _txn;
+ OperationContext* _opCtx;
// Unowned pointer to the request object to process.
const UpdateRequest* const _request;
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index 54e66be6133..4a02a753a78 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -57,17 +57,17 @@
namespace mongo {
-UpdateResult update(OperationContext* txn, Database* db, const UpdateRequest& request) {
+UpdateResult update(OperationContext* opCtx, Database* db, const UpdateRequest& request) {
invariant(db);
// Explain should never use this helper.
invariant(!request.isExplain());
- auto client = txn->getClient();
+ auto client = opCtx->getClient();
auto lastOpAtOperationStart = repl::ReplClientInfo::forClient(client).getLastOp();
ScopeGuard lastOpSetterGuard = MakeObjGuard(repl::ReplClientInfo::forClient(client),
&repl::ReplClientInfo::setLastOpToSystemLastOpTime,
- txn);
+ opCtx);
const NamespaceString& nsString = request.getNamespaceString();
Collection* collection = db->getCollection(nsString.ns());
@@ -82,16 +82,16 @@ UpdateResult update(OperationContext* txn, Database* db, const UpdateRequest& re
if (!collection && request.isUpsert()) {
// We have to have an exclusive lock on the db to be allowed to create the collection.
// Callers should either get an X or create the collection.
- const Locker* locker = txn->lockState();
+ const Locker* locker = opCtx->lockState();
invariant(locker->isW() ||
locker->isLockHeldForMode(ResourceId(RESOURCE_DATABASE, nsString.db()), MODE_X));
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock lk(txn->lockState(), nsString.db(), MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock lk(opCtx->lockState(), nsString.db(), MODE_X);
- const bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, nsString);
+ const bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, nsString);
if (userInitiatedWritesAndNotPrimary) {
uassertStatusOK(Status(ErrorCodes::PrimarySteppedDown,
@@ -99,21 +99,21 @@ UpdateResult update(OperationContext* txn, Database* db, const UpdateRequest& re
<< nsString.ns()
<< " during upsert"));
}
- WriteUnitOfWork wuow(txn);
- collection = db->createCollection(txn, nsString.ns(), CollectionOptions());
+ WriteUnitOfWork wuow(opCtx);
+ collection = db->createCollection(opCtx, nsString.ns(), CollectionOptions());
invariant(collection);
wuow.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createCollection", nsString.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "createCollection", nsString.ns());
}
// Parse the update, get an executor for it, run the executor, get stats out.
- ParsedUpdate parsedUpdate(txn, &request);
+ ParsedUpdate parsedUpdate(opCtx, &request);
uassertStatusOK(parsedUpdate.parseRequest());
OpDebug* const nullOpDebug = nullptr;
std::unique_ptr<PlanExecutor> exec =
- uassertStatusOK(getExecutorUpdate(txn, nullOpDebug, collection, &parsedUpdate));
+ uassertStatusOK(getExecutorUpdate(opCtx, nullOpDebug, collection, &parsedUpdate));
uassertStatusOK(exec->executePlan());
if (repl::ReplClientInfo::forClient(client).getLastOp() != lastOpAtOperationStart) {
diff --git a/src/mongo/db/ops/update.h b/src/mongo/db/ops/update.h
index 8ff64538a9d..2c5e0fc0f97 100644
--- a/src/mongo/db/ops/update.h
+++ b/src/mongo/db/ops/update.h
@@ -47,7 +47,7 @@ class UpdateDriver;
*
* Caller must hold the appropriate database locks.
*/
-UpdateResult update(OperationContext* txn, Database* db, const UpdateRequest& request);
+UpdateResult update(OperationContext* opCtx, Database* db, const UpdateRequest& request);
/**
* takes the from document and returns a new document
diff --git a/src/mongo/db/ops/update_driver.cpp b/src/mongo/db/ops/update_driver.cpp
index e5a63d64ccb..f94f520f032 100644
--- a/src/mongo/db/ops/update_driver.cpp
+++ b/src/mongo/db/ops/update_driver.cpp
@@ -173,7 +173,7 @@ inline Status UpdateDriver::addAndParse(const modifiertable::ModifierType type,
return Status::OK();
}
-Status UpdateDriver::populateDocumentWithQueryFields(OperationContext* txn,
+Status UpdateDriver::populateDocumentWithQueryFields(OperationContext* opCtx,
const BSONObj& query,
const vector<FieldRef*>* immutablePaths,
mutablebson::Document& doc) const {
@@ -182,7 +182,8 @@ Status UpdateDriver::populateDocumentWithQueryFields(OperationContext* txn,
// $where/$text clauses do not make sense, hence empty ExtensionsCallback.
auto qr = stdx::make_unique<QueryRequest>(NamespaceString(""));
qr->setFilter(query);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), ExtensionsCallbackNoop());
+ auto statusWithCQ =
+ CanonicalQuery::canonicalize(opCtx, std::move(qr), ExtensionsCallbackNoop());
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
diff --git a/src/mongo/db/ops/update_driver.h b/src/mongo/db/ops/update_driver.h
index a9579a89719..73f231f9764 100644
--- a/src/mongo/db/ops/update_driver.h
+++ b/src/mongo/db/ops/update_driver.h
@@ -70,7 +70,7 @@ public:
* Returns Status::OK() if the document can be used. If there are any error or
* conflicts along the way then those errors will be returned.
*/
- Status populateDocumentWithQueryFields(OperationContext* txn,
+ Status populateDocumentWithQueryFields(OperationContext* opCtx,
const BSONObj& query,
const std::vector<FieldRef*>* immutablePaths,
mutablebson::Document& doc) const;
diff --git a/src/mongo/db/ops/update_driver_test.cpp b/src/mongo/db/ops/update_driver_test.cpp
index 794ede90844..c7ef14b3e9f 100644
--- a/src/mongo/db/ops/update_driver_test.cpp
+++ b/src/mongo/db/ops/update_driver_test.cpp
@@ -181,7 +181,7 @@ public:
return *_driverRepl;
}
- OperationContext* txn() {
+ OperationContext* opCtx() {
return _opCtx.get();
}
@@ -254,139 +254,139 @@ static void assertSameFields(const BSONObj& docA, const BSONObj& docB) {
TEST_F(CreateFromQuery, BasicOp) {
BSONObj query = fromjson("{a:1,b:2}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(query, doc().getObject());
}
TEST_F(CreateFromQuery, BasicOpEq) {
BSONObj query = fromjson("{a:{$eq:1}}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(fromjson("{a:1}"), doc().getObject());
}
TEST_F(CreateFromQuery, BasicOpWithId) {
BSONObj query = fromjson("{_id:1,a:1,b:2}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(query, doc().getObject());
}
TEST_F(CreateFromQuery, BasicRepl) {
BSONObj query = fromjson("{a:1,b:2}");
- ASSERT_OK(driverRepl().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverRepl().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(fromjson("{}"), doc().getObject());
}
TEST_F(CreateFromQuery, BasicReplWithId) {
BSONObj query = fromjson("{_id:1,a:1,b:2}");
- ASSERT_OK(driverRepl().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverRepl().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(fromjson("{_id:1}"), doc().getObject());
}
TEST_F(CreateFromQuery, BasicReplWithIdEq) {
BSONObj query = fromjson("{_id:{$eq:1},a:1,b:2}");
- ASSERT_OK(driverRepl().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverRepl().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(fromjson("{_id:1}"), doc().getObject());
}
TEST_F(CreateFromQuery, NoRootIdOp) {
BSONObj query = fromjson("{'_id.a':1,'_id.b':2}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(fromjson("{_id:{a:1,b:2}}"), doc().getObject());
}
TEST_F(CreateFromQuery, NoRootIdRepl) {
BSONObj query = fromjson("{'_id.a':1,'_id.b':2}");
- ASSERT_NOT_OK(driverRepl().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_NOT_OK(driverRepl().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
}
TEST_F(CreateFromQuery, NestedSharedRootOp) {
BSONObj query = fromjson("{'a.c':1,'a.b':{$eq:2}}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(fromjson("{a:{c:1,b:2}}"), doc().getObject());
}
TEST_F(CreateFromQuery, OrQueryOp) {
BSONObj query = fromjson("{$or:[{a:1}]}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(fromjson("{a:1}"), doc().getObject());
}
TEST_F(CreateFromQuery, OrQueryIdRepl) {
BSONObj query = fromjson("{$or:[{_id:1}]}");
- ASSERT_OK(driverRepl().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverRepl().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(fromjson("{_id:1}"), doc().getObject());
}
TEST_F(CreateFromQuery, OrQueryNoExtractOps) {
BSONObj query = fromjson("{$or:[{a:1}, {b:2}]}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(BSONObj(), doc().getObject());
}
TEST_F(CreateFromQuery, OrQueryNoExtractIdRepl) {
BSONObj query = fromjson("{$or:[{_id:1}, {_id:2}]}");
- ASSERT_OK(driverRepl().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverRepl().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(BSONObj(), doc().getObject());
}
TEST_F(CreateFromQuery, AndQueryOp) {
BSONObj query = fromjson("{$and:[{'a.c':1},{'a.b':{$eq:2}}]}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(fromjson("{a:{c:1,b:2}}"), doc().getObject());
}
TEST_F(CreateFromQuery, AndQueryIdRepl) {
BSONObj query = fromjson("{$and:[{_id:1},{a:{$eq:2}}]}");
- ASSERT_OK(driverRepl().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverRepl().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(fromjson("{_id:1}"), doc().getObject());
}
TEST_F(CreateFromQuery, AllArrayOp) {
BSONObj query = fromjson("{a:{$all:[1]}}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(fromjson("{a:1}"), doc().getObject());
}
TEST_F(CreateFromQuery, AllArrayIdRepl) {
BSONObj query = fromjson("{_id:{$all:[1]}, b:2}");
- ASSERT_OK(driverRepl().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverRepl().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(fromjson("{_id:1}"), doc().getObject());
}
TEST_F(CreateFromQuery, ConflictFieldsFailOp) {
BSONObj query = fromjson("{a:1,'a.b':1}");
- ASSERT_NOT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_NOT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
}
TEST_F(CreateFromQuery, ConflictFieldsFailSameValueOp) {
BSONObj query = fromjson("{a:{b:1},'a.b':1}");
- ASSERT_NOT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_NOT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
}
TEST_F(CreateFromQuery, ConflictWithIdRepl) {
BSONObj query = fromjson("{_id:1,'_id.a':1}");
- ASSERT_NOT_OK(driverRepl().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_NOT_OK(driverRepl().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
}
TEST_F(CreateFromQuery, ConflictAndQueryOp) {
BSONObj query = fromjson("{$and:[{a:{b:1}},{'a.b':{$eq:1}}]}");
- ASSERT_NOT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_NOT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
}
TEST_F(CreateFromQuery, ConflictAllMultipleValsOp) {
BSONObj query = fromjson("{a:{$all:[1, 2]}}");
- ASSERT_NOT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_NOT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
}
TEST_F(CreateFromQuery, NoConflictOrQueryOp) {
BSONObj query = fromjson("{$or:[{a:{b:1}},{'a.b':{$eq:1}}]}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(BSONObj(), doc().getObject());
}
TEST_F(CreateFromQuery, ImmutableFieldsOp) {
BSONObj query = fromjson("{$or:[{a:{b:1}},{'a.b':{$eq:1}}]}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(txn(), query, NULL, doc()));
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(opCtx(), query, NULL, doc()));
assertSameFields(BSONObj(), doc().getObject());
}
@@ -395,7 +395,7 @@ TEST_F(CreateFromQuery, ShardKeyRepl) {
OwnedPointerVector<FieldRef> immutablePaths;
immutablePaths.push_back(new FieldRef("a"));
ASSERT_OK(driverRepl().populateDocumentWithQueryFields(
- txn(), query, &immutablePaths.vector(), doc()));
+ opCtx(), query, &immutablePaths.vector(), doc()));
assertSameFields(fromjson("{a:1}"), doc().getObject());
}
@@ -405,7 +405,7 @@ TEST_F(CreateFromQuery, NestedShardKeyRepl) {
immutablePaths.push_back(new FieldRef("a"));
immutablePaths.push_back(new FieldRef("b.c"));
ASSERT_OK(driverRepl().populateDocumentWithQueryFields(
- txn(), query, &immutablePaths.vector(), doc()));
+ opCtx(), query, &immutablePaths.vector(), doc()));
assertSameFields(fromjson("{a:1,b:{c:2}}"), doc().getObject());
}
@@ -414,8 +414,8 @@ TEST_F(CreateFromQuery, NestedShardKeyOp) {
OwnedPointerVector<FieldRef> immutablePaths;
immutablePaths.push_back(new FieldRef("a"));
immutablePaths.push_back(new FieldRef("b.c"));
- ASSERT_OK(
- driverOps().populateDocumentWithQueryFields(txn(), query, &immutablePaths.vector(), doc()));
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(
+ opCtx(), query, &immutablePaths.vector(), doc()));
assertSameFields(fromjson("{a:1,b:{c:2},d:3}"), doc().getObject());
}
@@ -425,7 +425,7 @@ TEST_F(CreateFromQuery, NotFullShardKeyRepl) {
immutablePaths.push_back(new FieldRef("a"));
immutablePaths.push_back(new FieldRef("b"));
ASSERT_NOT_OK(driverRepl().populateDocumentWithQueryFields(
- txn(), query, &immutablePaths.vector(), doc()));
+ opCtx(), query, &immutablePaths.vector(), doc()));
}
} // unnamed namespace
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index f3f183a60fe..a0d0f067333 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -83,15 +83,15 @@ MONGO_FP_DECLARE(failAllInserts);
MONGO_FP_DECLARE(failAllUpdates);
MONGO_FP_DECLARE(failAllRemoves);
-void finishCurOp(OperationContext* txn, CurOp* curOp) {
+void finishCurOp(OperationContext* opCtx, CurOp* curOp) {
try {
curOp->done();
long long executionTimeMicros = curOp->totalTimeMicros();
curOp->debug().executionTimeMicros = executionTimeMicros;
- recordCurOpMetrics(txn);
- Top::get(txn->getServiceContext())
- .record(txn,
+ recordCurOpMetrics(opCtx);
+ Top::get(opCtx->getServiceContext())
+ .record(opCtx,
curOp->getNS(),
curOp->getLogicalOp(),
1, // "write locked"
@@ -111,16 +111,16 @@ void finishCurOp(OperationContext* txn, CurOp* curOp) {
const bool shouldSample = serverGlobalParams.sampleRate == 1.0
? true
- : txn->getClient()->getPrng().nextCanonicalDouble() < serverGlobalParams.sampleRate;
+ : opCtx->getClient()->getPrng().nextCanonicalDouble() < serverGlobalParams.sampleRate;
if (logAll || (shouldSample && logSlow)) {
Locker::LockerInfo lockerInfo;
- txn->lockState()->getLockerInfo(&lockerInfo);
- log() << curOp->debug().report(txn->getClient(), *curOp, lockerInfo.stats);
+ opCtx->lockState()->getLockerInfo(&lockerInfo);
+ log() << curOp->debug().report(opCtx->getClient(), *curOp, lockerInfo.stats);
}
if (shouldSample && curOp->shouldDBProfile()) {
- profile(txn, CurOp::get(txn)->getNetworkOp());
+ profile(opCtx, CurOp::get(opCtx)->getNetworkOp());
}
} catch (const DBException& ex) {
// We need to ignore all errors here. We don't want a successful op to fail because of a
@@ -135,8 +135,8 @@ void finishCurOp(OperationContext* txn, CurOp* curOp) {
*/
class LastOpFixer {
public:
- LastOpFixer(OperationContext* txn, const NamespaceString& ns)
- : _txn(txn), _isOnLocalDb(ns.isLocal()) {}
+ LastOpFixer(OperationContext* opCtx, const NamespaceString& ns)
+ : _opCtx(opCtx), _isOnLocalDb(ns.isLocal()) {}
~LastOpFixer() {
if (_needToFixLastOp && !_isOnLocalDb) {
@@ -144,7 +144,7 @@ public:
// here. No-op updates will not generate a new lastOp, so we still need the
// guard to fire in that case. Operations on the local DB aren't replicated, so they
// don't need to bump the lastOp.
- replClientInfo().setLastOpToSystemLastOpTime(_txn);
+ replClientInfo().setLastOpToSystemLastOpTime(_opCtx);
}
}
@@ -161,45 +161,45 @@ public:
private:
repl::ReplClientInfo& replClientInfo() {
- return repl::ReplClientInfo::forClient(_txn->getClient());
+ return repl::ReplClientInfo::forClient(_opCtx->getClient());
}
- OperationContext* const _txn;
+ OperationContext* const _opCtx;
bool _needToFixLastOp = true;
const bool _isOnLocalDb;
repl::OpTime _opTimeAtLastOpStart;
};
-void assertCanWrite_inlock(OperationContext* txn, const NamespaceString& ns) {
- uassert(
- ErrorCodes::PrimarySteppedDown,
- str::stream() << "Not primary while writing to " << ns.ns(),
- repl::ReplicationCoordinator::get(txn->getServiceContext())->canAcceptWritesFor(txn, ns));
- CollectionShardingState::get(txn, ns)->checkShardVersionOrThrow(txn);
+void assertCanWrite_inlock(OperationContext* opCtx, const NamespaceString& ns) {
+ uassert(ErrorCodes::PrimarySteppedDown,
+ str::stream() << "Not primary while writing to " << ns.ns(),
+ repl::ReplicationCoordinator::get(opCtx->getServiceContext())
+ ->canAcceptWritesFor(opCtx, ns));
+ CollectionShardingState::get(opCtx, ns)->checkShardVersionOrThrow(opCtx);
}
-void makeCollection(OperationContext* txn, const NamespaceString& ns) {
+void makeCollection(OperationContext* opCtx, const NamespaceString& ns) {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- AutoGetOrCreateDb db(txn, ns.db(), MODE_X);
- assertCanWrite_inlock(txn, ns);
+ AutoGetOrCreateDb db(opCtx, ns.db(), MODE_X);
+ assertCanWrite_inlock(opCtx, ns);
if (!db.getDb()->getCollection(ns.ns())) { // someone else may have beat us to it.
- WriteUnitOfWork wuow(txn);
- uassertStatusOK(userCreateNS(txn, db.getDb(), ns.ns(), BSONObj()));
+ WriteUnitOfWork wuow(opCtx);
+ uassertStatusOK(userCreateNS(opCtx, db.getDb(), ns.ns(), BSONObj()));
wuow.commit();
}
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "implicit collection creation", ns.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "implicit collection creation", ns.ns());
}
/**
* Returns true if the operation can continue.
*/
-bool handleError(OperationContext* txn,
+bool handleError(OperationContext* opCtx,
const DBException& ex,
const ParsedWriteOp& wholeOp,
WriteResult* out) {
- LastError::get(txn->getClient()).setLastError(ex.getCode(), ex.getInfo().msg);
- auto& curOp = *CurOp::get(txn);
+ LastError::get(opCtx->getClient()).setLastError(ex.getCode(), ex.getInfo().msg);
+ auto& curOp = *CurOp::get(opCtx);
curOp.debug().exceptionInfo = ex.getInfo();
if (ErrorCodes::isInterruption(ErrorCodes::Error(ex.getCode()))) {
@@ -221,8 +221,8 @@ bool handleError(OperationContext* txn,
<< demangleName(typeid(ex)));
}
- ShardingState::get(txn)->onStaleShardVersion(
- txn, wholeOp.ns, staleConfigException->getVersionReceived());
+ ShardingState::get(opCtx)->onStaleShardVersion(
+ opCtx, wholeOp.ns, staleConfigException->getVersionReceived());
out->staleConfigException =
stdx::make_unique<SendStaleConfigException>(*staleConfigException);
return false;
@@ -234,7 +234,7 @@ bool handleError(OperationContext* txn,
} // namespace
-static WriteResult::SingleResult createIndex(OperationContext* txn,
+static WriteResult::SingleResult createIndex(OperationContext* opCtx,
const NamespaceString& systemIndexes,
const BSONObj& spec) {
BSONElement nsElement = spec["ns"];
@@ -264,7 +264,7 @@ static WriteResult::SingleResult createIndex(OperationContext* txn,
.done();
rpc::CommandRequest cmdRequest(&cmdRequestMsg);
rpc::CommandReplyBuilder cmdReplyBuilder;
- Command::findCommand("createIndexes")->run(txn, cmdRequest, &cmdReplyBuilder);
+ Command::findCommand("createIndexes")->run(opCtx, cmdRequest, &cmdReplyBuilder);
auto cmdReplyMsg = cmdReplyBuilder.done();
rpc::CommandReply cmdReply(&cmdReplyMsg);
auto cmdResult = cmdReply.getCommandReply();
@@ -273,12 +273,12 @@ static WriteResult::SingleResult createIndex(OperationContext* txn,
// Unlike normal inserts, it is not an error to "insert" a duplicate index.
long long n =
cmdResult["numIndexesAfter"].numberInt() - cmdResult["numIndexesBefore"].numberInt();
- CurOp::get(txn)->debug().ninserted += n;
+ CurOp::get(opCtx)->debug().ninserted += n;
return {n};
}
-static WriteResult performCreateIndexes(OperationContext* txn, const InsertOp& wholeOp) {
+static WriteResult performCreateIndexes(OperationContext* opCtx, const InsertOp& wholeOp) {
// Currently this creates each index independently. We could pass multiple indexes to
// createIndexes, but there is a lot of complexity involved in doing it correctly. For one
// thing, createIndexes only takes indexes to a single collection, but this batch could include
@@ -287,15 +287,15 @@ static WriteResult performCreateIndexes(OperationContext* txn, const InsertOp& w
// errors or stops at the first one. These could theoretically be worked around, but it doesn't
// seem worth it since users that want faster index builds should just use the createIndexes
// command rather than a legacy emulation.
- LastOpFixer lastOpFixer(txn, wholeOp.ns);
+ LastOpFixer lastOpFixer(opCtx, wholeOp.ns);
WriteResult out;
for (auto&& spec : wholeOp.documents) {
try {
lastOpFixer.startingOp();
- out.results.emplace_back(createIndex(txn, wholeOp.ns, spec));
+ out.results.emplace_back(createIndex(opCtx, wholeOp.ns, spec));
lastOpFixer.finishedOpSuccessfully();
} catch (const DBException& ex) {
- const bool canContinue = handleError(txn, ex, wholeOp, &out);
+ const bool canContinue = handleError(opCtx, ex, wholeOp, &out);
if (!canContinue)
break;
}
@@ -303,22 +303,22 @@ static WriteResult performCreateIndexes(OperationContext* txn, const InsertOp& w
return out;
}
-static void insertDocuments(OperationContext* txn,
+static void insertDocuments(OperationContext* opCtx,
Collection* collection,
std::vector<BSONObj>::const_iterator begin,
std::vector<BSONObj>::const_iterator end) {
// Intentionally not using a WRITE_CONFLICT_RETRY_LOOP. That is handled by the caller so it can
// react to oversized batches.
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
uassertStatusOK(collection->insertDocuments(
- txn, begin, end, &CurOp::get(txn)->debug(), /*enforceQuota*/ true));
+ opCtx, begin, end, &CurOp::get(opCtx)->debug(), /*enforceQuota*/ true));
wuow.commit();
}
/**
* Returns true if caller should try to insert more documents. Does nothing else if batch is empty.
*/
-static bool insertBatchAndHandleErrors(OperationContext* txn,
+static bool insertBatchAndHandleErrors(OperationContext* opCtx,
const InsertOp& wholeOp,
const std::vector<BSONObj>& batch,
LastOpFixer* lastOpFixer,
@@ -326,27 +326,27 @@ static bool insertBatchAndHandleErrors(OperationContext* txn,
if (batch.empty())
return true;
- auto& curOp = *CurOp::get(txn);
+ auto& curOp = *CurOp::get(opCtx);
boost::optional<AutoGetCollection> collection;
auto acquireCollection = [&] {
while (true) {
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
if (MONGO_FAIL_POINT(failAllInserts)) {
uasserted(ErrorCodes::InternalError, "failAllInserts failpoint active!");
}
- collection.emplace(txn, wholeOp.ns, MODE_IX);
+ collection.emplace(opCtx, wholeOp.ns, MODE_IX);
if (collection->getCollection())
break;
collection.reset(); // unlock.
- makeCollection(txn, wholeOp.ns);
+ makeCollection(opCtx, wholeOp.ns);
}
curOp.raiseDbProfileLevel(collection->getDb()->getProfilingLevel());
- assertCanWrite_inlock(txn, wholeOp.ns);
+ assertCanWrite_inlock(opCtx, wholeOp.ns);
};
try {
@@ -355,7 +355,7 @@ static bool insertBatchAndHandleErrors(OperationContext* txn,
// First try doing it all together. If all goes well, this is all we need to do.
// See Collection::_insertDocuments for why we do all capped inserts one-at-a-time.
lastOpFixer->startingOp();
- insertDocuments(txn, collection->getCollection(), batch.begin(), batch.end());
+ insertDocuments(opCtx, collection->getCollection(), batch.begin(), batch.end());
lastOpFixer->finishedOpSuccessfully();
globalOpCounters.gotInserts(batch.size());
std::fill_n(
@@ -379,7 +379,7 @@ static bool insertBatchAndHandleErrors(OperationContext* txn,
if (!collection)
acquireCollection();
lastOpFixer->startingOp();
- insertDocuments(txn, collection->getCollection(), it, it + 1);
+ insertDocuments(opCtx, collection->getCollection(), it, it + 1);
lastOpFixer->finishedOpSuccessfully();
out->results.emplace_back(WriteResult::SingleResult{1});
curOp.debug().ninserted++;
@@ -390,9 +390,9 @@ static bool insertBatchAndHandleErrors(OperationContext* txn,
throw;
}
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "insert", wholeOp.ns.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "insert", wholeOp.ns.ns());
} catch (const DBException& ex) {
- bool canContinue = handleError(txn, ex, wholeOp, out);
+ bool canContinue = handleError(opCtx, ex, wholeOp, out);
if (!canContinue)
return false;
}
@@ -401,15 +401,15 @@ static bool insertBatchAndHandleErrors(OperationContext* txn,
return true;
}
-WriteResult performInserts(OperationContext* txn, const InsertOp& wholeOp) {
- invariant(!txn->lockState()->inAWriteUnitOfWork()); // Does own retries.
- auto& curOp = *CurOp::get(txn);
+WriteResult performInserts(OperationContext* opCtx, const InsertOp& wholeOp) {
+ invariant(!opCtx->lockState()->inAWriteUnitOfWork()); // Does own retries.
+ auto& curOp = *CurOp::get(opCtx);
ON_BLOCK_EXIT([&] {
// This is the only part of finishCurOp we need to do for inserts because they reuse the
// top-level curOp. The rest is handled by the top-level entrypoint.
curOp.done();
- Top::get(txn->getServiceContext())
- .record(txn,
+ Top::get(opCtx->getServiceContext())
+ .record(opCtx,
wholeOp.ns.ns(),
LogicalOp::opInsert,
1 /* write locked*/,
@@ -420,7 +420,7 @@ WriteResult performInserts(OperationContext* txn, const InsertOp& wholeOp) {
});
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp.setNS_inlock(wholeOp.ns.ns());
curOp.setLogicalOp_inlock(LogicalOp::opInsert);
curOp.ensureStarted();
@@ -430,11 +430,11 @@ WriteResult performInserts(OperationContext* txn, const InsertOp& wholeOp) {
uassertStatusOK(userAllowedWriteNS(wholeOp.ns));
if (wholeOp.ns.isSystemDotIndexes()) {
- return performCreateIndexes(txn, wholeOp);
+ return performCreateIndexes(opCtx, wholeOp);
}
- DisableDocumentValidationIfTrue docValidationDisabler(txn, wholeOp.bypassDocumentValidation);
- LastOpFixer lastOpFixer(txn, wholeOp.ns);
+ DisableDocumentValidationIfTrue docValidationDisabler(opCtx, wholeOp.bypassDocumentValidation);
+ LastOpFixer lastOpFixer(opCtx, wholeOp.ns);
WriteResult out;
out.results.reserve(wholeOp.documents.size());
@@ -446,7 +446,7 @@ WriteResult performInserts(OperationContext* txn, const InsertOp& wholeOp) {
for (auto&& doc : wholeOp.documents) {
const bool isLastDoc = (&doc == &wholeOp.documents.back());
- auto fixedDoc = fixDocumentForInsert(txn->getServiceContext(), doc);
+ auto fixedDoc = fixDocumentForInsert(opCtx->getServiceContext(), doc);
if (!fixedDoc.isOK()) {
// Handled after we insert anything in the batch to be sure we report errors in the
// correct order. In an ordered insert, if one of the docs ahead of us fails, we should
@@ -458,14 +458,14 @@ WriteResult performInserts(OperationContext* txn, const InsertOp& wholeOp) {
continue; // Add more to batch before inserting.
}
- bool canContinue = insertBatchAndHandleErrors(txn, wholeOp, batch, &lastOpFixer, &out);
+ bool canContinue = insertBatchAndHandleErrors(opCtx, wholeOp, batch, &lastOpFixer, &out);
batch.clear(); // We won't need the current batch any more.
bytesInBatch = 0;
if (canContinue && !fixedDoc.isOK()) {
globalOpCounters.gotInsert();
canContinue = handleError(
- txn,
+ opCtx,
UserException(fixedDoc.getStatus().code(), fixedDoc.getStatus().reason()),
wholeOp,
&out);
@@ -478,13 +478,13 @@ WriteResult performInserts(OperationContext* txn, const InsertOp& wholeOp) {
return out;
}
-static WriteResult::SingleResult performSingleUpdateOp(OperationContext* txn,
+static WriteResult::SingleResult performSingleUpdateOp(OperationContext* opCtx,
const NamespaceString& ns,
const UpdateOp::SingleUpdate& op) {
globalOpCounters.gotUpdate();
- auto& curOp = *CurOp::get(txn);
+ auto& curOp = *CurOp::get(opCtx);
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp.setNS_inlock(ns.ns());
curOp.setNetworkOp_inlock(dbUpdate);
curOp.setLogicalOp_inlock(LogicalOp::opUpdate);
@@ -503,18 +503,18 @@ static WriteResult::SingleResult performSingleUpdateOp(OperationContext* txn,
request.setUpsert(op.upsert);
request.setYieldPolicy(PlanExecutor::YIELD_AUTO); // ParsedUpdate overrides this for $isolated.
- ParsedUpdate parsedUpdate(txn, &request);
+ ParsedUpdate parsedUpdate(opCtx, &request);
uassertStatusOK(parsedUpdate.parseRequest());
- ScopedTransaction scopedXact(txn, MODE_IX);
+ ScopedTransaction scopedXact(opCtx, MODE_IX);
boost::optional<AutoGetCollection> collection;
while (true) {
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
if (MONGO_FAIL_POINT(failAllUpdates)) {
uasserted(ErrorCodes::InternalError, "failAllUpdates failpoint active!");
}
- collection.emplace(txn,
+ collection.emplace(opCtx,
ns,
MODE_IX, // DB is always IX, even if collection is X.
parsedUpdate.isIsolated() ? MODE_X : MODE_IX);
@@ -522,21 +522,21 @@ static WriteResult::SingleResult performSingleUpdateOp(OperationContext* txn,
break;
collection.reset(); // unlock.
- makeCollection(txn, ns);
+ makeCollection(opCtx, ns);
}
if (collection->getDb()) {
curOp.raiseDbProfileLevel(collection->getDb()->getProfilingLevel());
}
- assertCanWrite_inlock(txn, ns);
+ assertCanWrite_inlock(opCtx, ns);
auto exec = uassertStatusOK(
- getExecutorUpdate(txn, &curOp.debug(), collection->getCollection(), &parsedUpdate));
+ getExecutorUpdate(opCtx, &curOp.debug(), collection->getCollection(), &parsedUpdate));
{
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
}
uassertStatusOK(exec->executePlan());
@@ -544,7 +544,7 @@ static WriteResult::SingleResult performSingleUpdateOp(OperationContext* txn,
PlanSummaryStats summary;
Explain::getSummaryStats(*exec, &summary);
if (collection->getCollection()) {
- collection->getCollection()->infoCache()->notifyOfQuery(txn, summary.indexesUsed);
+ collection->getCollection()->infoCache()->notifyOfQuery(opCtx, summary.indexesUsed);
}
if (curOp.shouldDBProfile()) {
@@ -560,37 +560,37 @@ static WriteResult::SingleResult performSingleUpdateOp(OperationContext* txn,
const bool didInsert = !res.upserted.isEmpty();
const long long nMatchedOrInserted = didInsert ? 1 : res.numMatched;
- LastError::get(txn->getClient()).recordUpdate(res.existing, nMatchedOrInserted, res.upserted);
+ LastError::get(opCtx->getClient()).recordUpdate(res.existing, nMatchedOrInserted, res.upserted);
return {nMatchedOrInserted, res.numDocsModified, res.upserted};
}
-WriteResult performUpdates(OperationContext* txn, const UpdateOp& wholeOp) {
- invariant(!txn->lockState()->inAWriteUnitOfWork()); // Does own retries.
+WriteResult performUpdates(OperationContext* opCtx, const UpdateOp& wholeOp) {
+ invariant(!opCtx->lockState()->inAWriteUnitOfWork()); // Does own retries.
uassertStatusOK(userAllowedWriteNS(wholeOp.ns));
- DisableDocumentValidationIfTrue docValidationDisabler(txn, wholeOp.bypassDocumentValidation);
- LastOpFixer lastOpFixer(txn, wholeOp.ns);
+ DisableDocumentValidationIfTrue docValidationDisabler(opCtx, wholeOp.bypassDocumentValidation);
+ LastOpFixer lastOpFixer(opCtx, wholeOp.ns);
WriteResult out;
out.results.reserve(wholeOp.updates.size());
for (auto&& singleOp : wholeOp.updates) {
// TODO: don't create nested CurOp for legacy writes.
// Add Command pointer to the nested CurOp.
- auto& parentCurOp = *CurOp::get(txn);
+ auto& parentCurOp = *CurOp::get(opCtx);
Command* cmd = parentCurOp.getCommand();
- CurOp curOp(txn);
+ CurOp curOp(opCtx);
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp.setCommand_inlock(cmd);
}
- ON_BLOCK_EXIT([&] { finishCurOp(txn, &curOp); });
+ ON_BLOCK_EXIT([&] { finishCurOp(opCtx, &curOp); });
try {
lastOpFixer.startingOp();
- out.results.emplace_back(performSingleUpdateOp(txn, wholeOp.ns, singleOp));
+ out.results.emplace_back(performSingleUpdateOp(opCtx, wholeOp.ns, singleOp));
lastOpFixer.finishedOpSuccessfully();
} catch (const DBException& ex) {
- const bool canContinue = handleError(txn, ex, wholeOp, &out);
+ const bool canContinue = handleError(opCtx, ex, wholeOp, &out);
if (!canContinue)
break;
}
@@ -599,13 +599,13 @@ WriteResult performUpdates(OperationContext* txn, const UpdateOp& wholeOp) {
return out;
}
-static WriteResult::SingleResult performSingleDeleteOp(OperationContext* txn,
+static WriteResult::SingleResult performSingleDeleteOp(OperationContext* opCtx,
const NamespaceString& ns,
const DeleteOp::SingleDelete& op) {
globalOpCounters.gotDelete();
- auto& curOp = *CurOp::get(txn);
+ auto& curOp = *CurOp::get(opCtx);
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp.setNS_inlock(ns.ns());
curOp.setNetworkOp_inlock(dbDelete);
curOp.setLogicalOp_inlock(LogicalOp::opDelete);
@@ -622,17 +622,17 @@ static WriteResult::SingleResult performSingleDeleteOp(OperationContext* txn,
request.setMulti(op.multi);
request.setYieldPolicy(PlanExecutor::YIELD_AUTO); // ParsedDelete overrides this for $isolated.
- ParsedDelete parsedDelete(txn, &request);
+ ParsedDelete parsedDelete(opCtx, &request);
uassertStatusOK(parsedDelete.parseRequest());
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
if (MONGO_FAIL_POINT(failAllRemoves)) {
uasserted(ErrorCodes::InternalError, "failAllRemoves failpoint active!");
}
- ScopedTransaction scopedXact(txn, MODE_IX);
- AutoGetCollection collection(txn,
+ ScopedTransaction scopedXact(opCtx, MODE_IX);
+ AutoGetCollection collection(opCtx,
ns,
MODE_IX, // DB is always IX, even if collection is X.
parsedDelete.isIsolated() ? MODE_X : MODE_IX);
@@ -640,14 +640,14 @@ static WriteResult::SingleResult performSingleDeleteOp(OperationContext* txn,
curOp.raiseDbProfileLevel(collection.getDb()->getProfilingLevel());
}
- assertCanWrite_inlock(txn, ns);
+ assertCanWrite_inlock(opCtx, ns);
auto exec = uassertStatusOK(
- getExecutorDelete(txn, &curOp.debug(), collection.getCollection(), &parsedDelete));
+ getExecutorDelete(opCtx, &curOp.debug(), collection.getCollection(), &parsedDelete));
{
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
}
uassertStatusOK(exec->executePlan());
@@ -657,7 +657,7 @@ static WriteResult::SingleResult performSingleDeleteOp(OperationContext* txn,
PlanSummaryStats summary;
Explain::getSummaryStats(*exec, &summary);
if (collection.getCollection()) {
- collection.getCollection()->infoCache()->notifyOfQuery(txn, summary.indexesUsed);
+ collection.getCollection()->infoCache()->notifyOfQuery(opCtx, summary.indexesUsed);
}
curOp.debug().setPlanSummaryMetrics(summary);
@@ -667,37 +667,37 @@ static WriteResult::SingleResult performSingleDeleteOp(OperationContext* txn,
curOp.debug().execStats = execStatsBob.obj();
}
- LastError::get(txn->getClient()).recordDelete(n);
+ LastError::get(opCtx->getClient()).recordDelete(n);
return {n};
}
-WriteResult performDeletes(OperationContext* txn, const DeleteOp& wholeOp) {
- invariant(!txn->lockState()->inAWriteUnitOfWork()); // Does own retries.
+WriteResult performDeletes(OperationContext* opCtx, const DeleteOp& wholeOp) {
+ invariant(!opCtx->lockState()->inAWriteUnitOfWork()); // Does own retries.
uassertStatusOK(userAllowedWriteNS(wholeOp.ns));
- DisableDocumentValidationIfTrue docValidationDisabler(txn, wholeOp.bypassDocumentValidation);
- LastOpFixer lastOpFixer(txn, wholeOp.ns);
+ DisableDocumentValidationIfTrue docValidationDisabler(opCtx, wholeOp.bypassDocumentValidation);
+ LastOpFixer lastOpFixer(opCtx, wholeOp.ns);
WriteResult out;
out.results.reserve(wholeOp.deletes.size());
for (auto&& singleOp : wholeOp.deletes) {
// TODO: don't create nested CurOp for legacy writes.
// Add Command pointer to the nested CurOp.
- auto& parentCurOp = *CurOp::get(txn);
+ auto& parentCurOp = *CurOp::get(opCtx);
Command* cmd = parentCurOp.getCommand();
- CurOp curOp(txn);
+ CurOp curOp(opCtx);
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp.setCommand_inlock(cmd);
}
- ON_BLOCK_EXIT([&] { finishCurOp(txn, &curOp); });
+ ON_BLOCK_EXIT([&] { finishCurOp(opCtx, &curOp); });
try {
lastOpFixer.startingOp();
- out.results.emplace_back(performSingleDeleteOp(txn, wholeOp.ns, singleOp));
+ out.results.emplace_back(performSingleDeleteOp(opCtx, wholeOp.ns, singleOp));
lastOpFixer.finishedOpSuccessfully();
} catch (const DBException& ex) {
- const bool canContinue = handleError(txn, ex, wholeOp, &out);
+ const bool canContinue = handleError(opCtx, ex, wholeOp, &out);
if (!canContinue)
break;
}
diff --git a/src/mongo/db/ops/write_ops_exec.h b/src/mongo/db/ops/write_ops_exec.h
index 49d3d2e0cf1..f67a8cee657 100644
--- a/src/mongo/db/ops/write_ops_exec.h
+++ b/src/mongo/db/ops/write_ops_exec.h
@@ -76,8 +76,8 @@ struct WriteResult {
* exception being thrown from these functions. Callers are responsible for managing LastError in
* that case. This should generally be combined with LastError handling from parse failures.
*/
-WriteResult performInserts(OperationContext* txn, const InsertOp& op);
-WriteResult performUpdates(OperationContext* txn, const UpdateOp& op);
-WriteResult performDeletes(OperationContext* txn, const DeleteOp& op);
+WriteResult performInserts(OperationContext* opCtx, const InsertOp& op);
+WriteResult performUpdates(OperationContext* opCtx, const UpdateOp& op);
+WriteResult performDeletes(OperationContext* opCtx, const DeleteOp& op);
} // namespace mongo
diff --git a/src/mongo/db/pipeline/expression_context_for_test.h b/src/mongo/db/pipeline/expression_context_for_test.h
index d093cfe2bfa..234b54e1a10 100644
--- a/src/mongo/db/pipeline/expression_context_for_test.h
+++ b/src/mongo/db/pipeline/expression_context_for_test.h
@@ -42,8 +42,8 @@ class ExpressionContextForTest : public ExpressionContext {
public:
ExpressionContextForTest() = default;
- ExpressionContextForTest(OperationContext* txn, const AggregationRequest& request)
- : ExpressionContext(txn, request, nullptr, {}) {}
+ ExpressionContextForTest(OperationContext* opCtx, const AggregationRequest& request)
+ : ExpressionContext(opCtx, request, nullptr, {}) {}
/**
* Changes the collation used by this ExpressionContext. Must not be changed after parsing a
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index 0de995f29a8..b976e25f865 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -221,7 +221,7 @@ private:
* percentage of the collection.
*/
StatusWith<unique_ptr<PlanExecutor>> createRandomCursorExecutor(Collection* collection,
- OperationContext* txn,
+ OperationContext* opCtx,
long long sampleSize,
long long numRecords) {
double kMaxSampleRatioForRandCursor = 0.05;
@@ -231,18 +231,19 @@ StatusWith<unique_ptr<PlanExecutor>> createRandomCursorExecutor(Collection* coll
// Attempt to get a random cursor from the RecordStore. If the RecordStore does not support
// random cursors, attempt to get one from the _id index.
- std::unique_ptr<RecordCursor> rsRandCursor = collection->getRecordStore()->getRandomCursor(txn);
+ std::unique_ptr<RecordCursor> rsRandCursor =
+ collection->getRecordStore()->getRandomCursor(opCtx);
auto ws = stdx::make_unique<WorkingSet>();
std::unique_ptr<PlanStage> stage;
if (rsRandCursor) {
- stage = stdx::make_unique<MultiIteratorStage>(txn, ws.get(), collection);
+ stage = stdx::make_unique<MultiIteratorStage>(opCtx, ws.get(), collection);
static_cast<MultiIteratorStage*>(stage.get())->addIterator(std::move(rsRandCursor));
} else {
auto indexCatalog = collection->getIndexCatalog();
- auto indexDescriptor = indexCatalog->findIdIndex(txn);
+ auto indexDescriptor = indexCatalog->findIdIndex(opCtx);
if (!indexDescriptor) {
// There was no _id index.
@@ -250,34 +251,34 @@ StatusWith<unique_ptr<PlanExecutor>> createRandomCursorExecutor(Collection* coll
}
IndexAccessMethod* idIam = indexCatalog->getIndex(indexDescriptor);
- auto idxRandCursor = idIam->newRandomCursor(txn);
+ auto idxRandCursor = idIam->newRandomCursor(opCtx);
if (!idxRandCursor) {
// Storage engine does not support any type of random cursor.
return {nullptr};
}
- auto idxIterator = stdx::make_unique<IndexIteratorStage>(txn,
+ auto idxIterator = stdx::make_unique<IndexIteratorStage>(opCtx,
ws.get(),
collection,
idIam,
indexDescriptor->keyPattern(),
std::move(idxRandCursor));
stage = stdx::make_unique<FetchStage>(
- txn, ws.get(), idxIterator.release(), nullptr, collection);
+ opCtx, ws.get(), idxIterator.release(), nullptr, collection);
}
{
- AutoGetCollection autoColl(txn, collection->ns(), MODE_IS);
+ AutoGetCollection autoColl(opCtx, collection->ns(), MODE_IS);
// If we're in a sharded environment, we need to filter out documents we don't own.
- if (ShardingState::get(txn)->needCollectionMetadata(txn, collection->ns().ns())) {
+ if (ShardingState::get(opCtx)->needCollectionMetadata(opCtx, collection->ns().ns())) {
auto shardFilterStage = stdx::make_unique<ShardFilterStage>(
- txn,
- CollectionShardingState::get(txn, collection->ns())->getMetadata(),
+ opCtx,
+ CollectionShardingState::get(opCtx, collection->ns())->getMetadata(),
ws.get(),
stage.release());
- return PlanExecutor::make(txn,
+ return PlanExecutor::make(opCtx,
std::move(ws),
std::move(shardFilterStage),
collection,
@@ -286,11 +287,11 @@ StatusWith<unique_ptr<PlanExecutor>> createRandomCursorExecutor(Collection* coll
}
return PlanExecutor::make(
- txn, std::move(ws), std::move(stage), collection, PlanExecutor::YIELD_AUTO);
+ opCtx, std::move(ws), std::move(stage), collection, PlanExecutor::YIELD_AUTO);
}
StatusWith<std::unique_ptr<PlanExecutor>> attemptToGetExecutor(
- OperationContext* txn,
+ OperationContext* opCtx,
Collection* collection,
const intrusive_ptr<ExpressionContext>& pExpCtx,
BSONObj queryObj,
@@ -318,7 +319,7 @@ StatusWith<std::unique_ptr<PlanExecutor>> attemptToGetExecutor(
const ExtensionsCallbackReal extensionsCallback(pExpCtx->opCtx, &pExpCtx->ns);
- auto cq = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
+ auto cq = CanonicalQuery::canonicalize(opCtx, std::move(qr), extensionsCallback);
if (!cq.isOK()) {
// Return an error instead of uasserting, since there are cases where the combination of
@@ -330,7 +331,7 @@ StatusWith<std::unique_ptr<PlanExecutor>> attemptToGetExecutor(
}
return getExecutor(
- txn, collection, std::move(cq.getValue()), PlanExecutor::YIELD_AUTO, plannerOpts);
+ opCtx, collection, std::move(cq.getValue()), PlanExecutor::YIELD_AUTO, plannerOpts);
}
} // namespace
@@ -446,7 +447,7 @@ void PipelineD::prepareCursorSource(Collection* collection,
}
StatusWith<std::unique_ptr<PlanExecutor>> PipelineD::prepareExecutor(
- OperationContext* txn,
+ OperationContext* opCtx,
Collection* collection,
const NamespaceString& nss,
const intrusive_ptr<Pipeline>& pipeline,
@@ -479,7 +480,7 @@ StatusWith<std::unique_ptr<PlanExecutor>> PipelineD::prepareExecutor(
// If we are connecting directly to the shard rather than through a mongos, don't filter out
// orphaned documents.
- if (ShardingState::get(txn)->needCollectionMetadata(txn, nss.ns())) {
+ if (ShardingState::get(opCtx)->needCollectionMetadata(opCtx, nss.ns())) {
plannerOpts |= QueryPlannerParams::INCLUDE_SHARD_FILTER;
}
@@ -499,12 +500,18 @@ StatusWith<std::unique_ptr<PlanExecutor>> PipelineD::prepareExecutor(
BSONObj emptyProjection;
if (sortStage) {
// See if the query system can provide a non-blocking sort.
- auto swExecutorSort = attemptToGetExecutor(
- txn, collection, expCtx, queryObj, emptyProjection, *sortObj, aggRequest, plannerOpts);
+ auto swExecutorSort = attemptToGetExecutor(opCtx,
+ collection,
+ expCtx,
+ queryObj,
+ emptyProjection,
+ *sortObj,
+ aggRequest,
+ plannerOpts);
if (swExecutorSort.isOK()) {
// Success! Now see if the query system can also cover the projection.
- auto swExecutorSortAndProj = attemptToGetExecutor(txn,
+ auto swExecutorSortAndProj = attemptToGetExecutor(opCtx,
collection,
expCtx,
queryObj,
@@ -553,7 +560,7 @@ StatusWith<std::unique_ptr<PlanExecutor>> PipelineD::prepareExecutor(
// See if the query system can cover the projection.
auto swExecutorProj = attemptToGetExecutor(
- txn, collection, expCtx, queryObj, *projectionObj, *sortObj, aggRequest, plannerOpts);
+ opCtx, collection, expCtx, queryObj, *projectionObj, *sortObj, aggRequest, plannerOpts);
if (swExecutorProj.isOK()) {
// Success! We have a covered projection.
return std::move(swExecutorProj.getValue());
@@ -568,7 +575,7 @@ StatusWith<std::unique_ptr<PlanExecutor>> PipelineD::prepareExecutor(
*projectionObj = BSONObj();
// If this doesn't work, nothing will.
return attemptToGetExecutor(
- txn, collection, expCtx, queryObj, *projectionObj, *sortObj, aggRequest, plannerOpts);
+ opCtx, collection, expCtx, queryObj, *projectionObj, *sortObj, aggRequest, plannerOpts);
}
void PipelineD::addCursorSource(Collection* collection,
diff --git a/src/mongo/db/pipeline/pipeline_d.h b/src/mongo/db/pipeline/pipeline_d.h
index 2df4dc45036..6117fd14cff 100644
--- a/src/mongo/db/pipeline/pipeline_d.h
+++ b/src/mongo/db/pipeline/pipeline_d.h
@@ -97,7 +97,7 @@ private:
* covered projection.
*/
static StatusWith<std::unique_ptr<PlanExecutor>> prepareExecutor(
- OperationContext* txn,
+ OperationContext* opCtx,
Collection* collection,
const NamespaceString& nss,
const boost::intrusive_ptr<Pipeline>& pipeline,
diff --git a/src/mongo/db/prefetch.cpp b/src/mongo/db/prefetch.cpp
index d698def8a3a..48d36f427d6 100644
--- a/src/mongo/db/prefetch.cpp
+++ b/src/mongo/db/prefetch.cpp
@@ -65,7 +65,7 @@ TimerStats prefetchDocStats;
ServerStatusMetricField<TimerStats> displayPrefetchDocPages("repl.preload.docs", &prefetchDocStats);
// page in pages needed for all index lookups on a given object
-void prefetchIndexPages(OperationContext* txn,
+void prefetchIndexPages(OperationContext* opCtx,
Collection* collection,
const ReplSettings::IndexPrefetchConfig& prefetchConfig,
const BSONObj& obj) {
@@ -80,12 +80,12 @@ void prefetchIndexPages(OperationContext* txn,
// on the update op case, the call to prefetchRecordPages will touch the _id index.
// thus perhaps this option isn't very useful?
try {
- IndexDescriptor* desc = collection->getIndexCatalog()->findIdIndex(txn);
+ IndexDescriptor* desc = collection->getIndexCatalog()->findIdIndex(opCtx);
if (!desc)
return;
IndexAccessMethod* iam = collection->getIndexCatalog()->getIndex(desc);
invariant(iam);
- iam->touch(txn, obj);
+ iam->touch(opCtx, obj);
} catch (const DBException& e) {
LOG(2) << "ignoring exception in prefetchIndexPages(): " << redact(e);
}
@@ -95,7 +95,7 @@ void prefetchIndexPages(OperationContext* txn,
// indexCount includes all indexes, including ones
// in the process of being built
IndexCatalog::IndexIterator ii =
- collection->getIndexCatalog()->getIndexIterator(txn, true);
+ collection->getIndexCatalog()->getIndexIterator(opCtx, true);
while (ii.more()) {
TimerHolder timer(&prefetchIndexStats);
// This will page in all index pages for the given object.
@@ -103,7 +103,7 @@ void prefetchIndexPages(OperationContext* txn,
IndexDescriptor* desc = ii.next();
IndexAccessMethod* iam = collection->getIndexCatalog()->getIndex(desc);
verify(iam);
- iam->touch(txn, obj);
+ iam->touch(opCtx, obj);
} catch (const DBException& e) {
LOG(2) << "ignoring exception in prefetchIndexPages(): " << redact(e);
}
@@ -116,7 +116,10 @@ void prefetchIndexPages(OperationContext* txn,
}
// page in the data pages for a record associated with an object
-void prefetchRecordPages(OperationContext* txn, Database* db, const char* ns, const BSONObj& obj) {
+void prefetchRecordPages(OperationContext* opCtx,
+ Database* db,
+ const char* ns,
+ const BSONObj& obj) {
BSONElement _id;
if (obj.getObjectID(_id)) {
TimerHolder timer(&prefetchDocStats);
@@ -124,7 +127,7 @@ void prefetchRecordPages(OperationContext* txn, Database* db, const char* ns, co
builder.append(_id);
BSONObj result;
try {
- if (Helpers::findById(txn, db, ns, builder.done(), result)) {
+ if (Helpers::findById(opCtx, db, ns, builder.done(), result)) {
// do we want to use Record::touch() here? it's pretty similar.
// volatile - avoid compiler optimizations for touching a mmap page
volatile char _dummy_char = '\0'; // NOLINT
@@ -144,7 +147,7 @@ void prefetchRecordPages(OperationContext* txn, Database* db, const char* ns, co
} // namespace
// prefetch for an oplog operation
-void prefetchPagesForReplicatedOp(OperationContext* txn, Database* db, const BSONObj& op) {
+void prefetchPagesForReplicatedOp(OperationContext* opCtx, Database* db, const BSONObj& op) {
invariant(db);
const ReplSettings::IndexPrefetchConfig prefetchConfig =
getGlobalReplicationCoordinator()->getIndexPrefetchConfig();
@@ -169,7 +172,7 @@ void prefetchPagesForReplicatedOp(OperationContext* txn, Database* db, const BSO
// This will have to change for engines other than MMAP V1, because they might not have
// means for directly prefetching pages from the collection. For this purpose, acquire S
// lock on the database, instead of optimizing with IS.
- Lock::CollectionLock collLock(txn->lockState(), ns, MODE_S);
+ Lock::CollectionLock collLock(opCtx->lockState(), ns, MODE_S);
Collection* collection = db->getCollection(ns);
if (!collection) {
@@ -193,7 +196,7 @@ void prefetchPagesForReplicatedOp(OperationContext* txn, Database* db, const BSO
// a way to achieve that would be to prefetch the record first, and then afterwards do
// this part.
//
- prefetchIndexPages(txn, collection, prefetchConfig, obj);
+ prefetchIndexPages(opCtx, collection, prefetchConfig, obj);
// do not prefetch the data for inserts; it doesn't exist yet
//
@@ -205,7 +208,7 @@ void prefetchPagesForReplicatedOp(OperationContext* txn, Database* db, const BSO
// do not prefetch the data for capped collections because
// they typically do not have an _id index for findById() to use.
!collection->isCapped()) {
- prefetchRecordPages(txn, db, ns, obj);
+ prefetchRecordPages(opCtx, db, ns, obj);
}
}
@@ -234,7 +237,7 @@ public:
}
}
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const string& name) {
+ virtual void append(OperationContext* opCtx, BSONObjBuilder& b, const string& name) {
b.append(name, _value());
}
diff --git a/src/mongo/db/prefetch.h b/src/mongo/db/prefetch.h
index a37c010f22c..8de85948a05 100644
--- a/src/mongo/db/prefetch.h
+++ b/src/mongo/db/prefetch.h
@@ -34,6 +34,6 @@ class OperationContext;
namespace repl {
// page in possible index and/or data pages for an op from the oplog
-void prefetchPagesForReplicatedOp(OperationContext* txn, Database* db, const BSONObj& op);
+void prefetchPagesForReplicatedOp(OperationContext* opCtx, Database* db, const BSONObj& op);
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/query/canonical_query.cpp b/src/mongo/db/query/canonical_query.cpp
index 4ef22584133..55d1b49f5cb 100644
--- a/src/mongo/db/query/canonical_query.cpp
+++ b/src/mongo/db/query/canonical_query.cpp
@@ -102,19 +102,19 @@ bool matchExpressionLessThan(const MatchExpression* lhs, const MatchExpression*
// static
StatusWith<std::unique_ptr<CanonicalQuery>> CanonicalQuery::canonicalize(
- OperationContext* txn, const QueryMessage& qm, const ExtensionsCallback& extensionsCallback) {
+ OperationContext* opCtx, const QueryMessage& qm, const ExtensionsCallback& extensionsCallback) {
// Make QueryRequest.
auto qrStatus = QueryRequest::fromLegacyQueryMessage(qm);
if (!qrStatus.isOK()) {
return qrStatus.getStatus();
}
- return CanonicalQuery::canonicalize(txn, std::move(qrStatus.getValue()), extensionsCallback);
+ return CanonicalQuery::canonicalize(opCtx, std::move(qrStatus.getValue()), extensionsCallback);
}
// static
StatusWith<std::unique_ptr<CanonicalQuery>> CanonicalQuery::canonicalize(
- OperationContext* txn,
+ OperationContext* opCtx,
std::unique_ptr<QueryRequest> qr,
const ExtensionsCallback& extensionsCallback) {
auto qrStatus = qr->validate();
@@ -124,7 +124,7 @@ StatusWith<std::unique_ptr<CanonicalQuery>> CanonicalQuery::canonicalize(
std::unique_ptr<CollatorInterface> collator;
if (!qr->getCollation().isEmpty()) {
- auto statusWithCollator = CollatorFactoryInterface::get(txn->getServiceContext())
+ auto statusWithCollator = CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(qr->getCollation());
if (!statusWithCollator.isOK()) {
return statusWithCollator.getStatus();
@@ -154,7 +154,7 @@ StatusWith<std::unique_ptr<CanonicalQuery>> CanonicalQuery::canonicalize(
// static
StatusWith<std::unique_ptr<CanonicalQuery>> CanonicalQuery::canonicalize(
- OperationContext* txn,
+ OperationContext* opCtx,
const CanonicalQuery& baseQuery,
MatchExpression* root,
const ExtensionsCallback& extensionsCallback) {
diff --git a/src/mongo/db/query/canonical_query.h b/src/mongo/db/query/canonical_query.h
index 3ff0ecb2d8a..dfcbaf070f0 100644
--- a/src/mongo/db/query/canonical_query.h
+++ b/src/mongo/db/query/canonical_query.h
@@ -47,13 +47,14 @@ public:
* If parsing succeeds, returns a std::unique_ptr<CanonicalQuery> representing the parsed
* query (which will never be NULL). If parsing fails, returns an error Status.
*
- * 'txn' must point to a valid OperationContext, but 'txn' does not need to outlive the returned
+ * 'opCtx' must point to a valid OperationContext, but 'opCtx' does not need to outlive the
+ * returned
* CanonicalQuery.
*
* Used for legacy find through the OP_QUERY message.
*/
static StatusWith<std::unique_ptr<CanonicalQuery>> canonicalize(
- OperationContext* txn,
+ OperationContext* opCtx,
const QueryMessage& qm,
const ExtensionsCallback& extensionsCallback);
@@ -61,11 +62,12 @@ public:
* If parsing succeeds, returns a std::unique_ptr<CanonicalQuery> representing the parsed
* query (which will never be NULL). If parsing fails, returns an error Status.
*
- * 'txn' must point to a valid OperationContext, but 'txn' does not need to outlive the returned
+ * 'opCtx' must point to a valid OperationContext, but 'opCtx' does not need to outlive the
+ * returned
* CanonicalQuery.
*/
static StatusWith<std::unique_ptr<CanonicalQuery>> canonicalize(
- OperationContext* txn, std::unique_ptr<QueryRequest> qr, const ExtensionsCallback&);
+ OperationContext* opCtx, std::unique_ptr<QueryRequest> qr, const ExtensionsCallback&);
/**
* For testing or for internal clients to use.
@@ -79,7 +81,7 @@ public:
* Does not take ownership of 'root'.
*/
static StatusWith<std::unique_ptr<CanonicalQuery>> canonicalize(
- OperationContext* txn,
+ OperationContext* opCtx,
const CanonicalQuery& baseQuery,
MatchExpression* root,
const ExtensionsCallback& extensionsCallback);
diff --git a/src/mongo/db/query/canonical_query_test.cpp b/src/mongo/db/query/canonical_query_test.cpp
index 292425167f7..a874f1f89e3 100644
--- a/src/mongo/db/query/canonical_query_test.cpp
+++ b/src/mongo/db/query/canonical_query_test.cpp
@@ -322,7 +322,7 @@ TEST(CanonicalQueryTest, IsValidTextAndSnapshot) {
TEST(CanonicalQueryTest, IsValidSortKeyMetaProjection) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
// Passing a sortKey meta-projection without a sort is an error.
{
@@ -330,7 +330,7 @@ TEST(CanonicalQueryTest, IsValidSortKeyMetaProjection) {
auto qr = assertGet(QueryRequest::makeFromFindCommand(
nss, fromjson("{find: 'testcoll', projection: {foo: {$meta: 'sortKey'}}}"), isExplain));
auto cq = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_NOT_OK(cq.getStatus());
}
@@ -342,7 +342,7 @@ TEST(CanonicalQueryTest, IsValidSortKeyMetaProjection) {
fromjson("{find: 'testcoll', projection: {foo: {$meta: 'sortKey'}}, sort: {bar: 1}}"),
isExplain));
auto cq = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(cq.getStatus());
}
}
@@ -435,12 +435,12 @@ TEST(CanonicalQueryTest, SortTreeNumChildrenComparison) {
*/
unique_ptr<CanonicalQuery> canonicalize(const char* queryStr) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson(queryStr));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
@@ -449,14 +449,14 @@ std::unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
const char* sortStr,
const char* projStr) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson(queryStr));
qr->setSort(fromjson(sortStr));
qr->setProj(fromjson(projStr));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
@@ -561,18 +561,18 @@ TEST(CanonicalQueryTest, NormalizeWithInPreservesCollator) {
TEST(CanonicalQueryTest, CanonicalizeFromBaseQuery) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
const bool isExplain = true;
const std::string cmdStr =
"{find:'bogusns', filter:{$or:[{a:1,b:1},{a:1,c:1}]}, projection:{a:1}, sort:{b:1}}";
auto qr = assertGet(QueryRequest::makeFromFindCommand(nss, fromjson(cmdStr), isExplain));
auto baseCq = assertGet(CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
MatchExpression* firstClauseExpr = baseCq->root()->getChild(0);
auto childCq = assertGet(CanonicalQuery::canonicalize(
- txn.get(), *baseCq, firstClauseExpr, ExtensionsCallbackDisallowExtensions()));
+ opCtx.get(), *baseCq, firstClauseExpr, ExtensionsCallbackDisallowExtensions()));
// Descriptive test. The childCq's filter should be the relevant $or clause, rather than the
// entire query predicate.
@@ -586,55 +586,55 @@ TEST(CanonicalQueryTest, CanonicalizeFromBaseQuery) {
TEST(CanonicalQueryTest, CanonicalQueryFromQRWithNoCollation) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
auto qr = stdx::make_unique<QueryRequest>(nss);
auto cq = assertGet(CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
ASSERT_TRUE(cq->getCollator() == nullptr);
}
TEST(CanonicalQueryTest, CanonicalQueryFromQRWithCollation) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setCollation(BSON("locale"
<< "reverse"));
auto cq = assertGet(CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
ASSERT_TRUE(CollatorInterface::collatorsMatch(cq->getCollator(), &collator));
}
TEST(CanonicalQueryTest, CanonicalQueryFromBaseQueryWithNoCollation) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson("{$or:[{a:1,b:1},{a:1,c:1}]}"));
auto baseCq = assertGet(CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
MatchExpression* firstClauseExpr = baseCq->root()->getChild(0);
auto childCq = assertGet(CanonicalQuery::canonicalize(
- txn.get(), *baseCq, firstClauseExpr, ExtensionsCallbackDisallowExtensions()));
+ opCtx.get(), *baseCq, firstClauseExpr, ExtensionsCallbackDisallowExtensions()));
ASSERT_TRUE(baseCq->getCollator() == nullptr);
ASSERT_TRUE(childCq->getCollator() == nullptr);
}
TEST(CanonicalQueryTest, CanonicalQueryFromBaseQueryWithCollation) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson("{$or:[{a:1,b:1},{a:1,c:1}]}"));
qr->setCollation(BSON("locale"
<< "reverse"));
auto baseCq = assertGet(CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
MatchExpression* firstClauseExpr = baseCq->root()->getChild(0);
auto childCq = assertGet(CanonicalQuery::canonicalize(
- txn.get(), *baseCq, firstClauseExpr, ExtensionsCallbackDisallowExtensions()));
+ opCtx.get(), *baseCq, firstClauseExpr, ExtensionsCallbackDisallowExtensions()));
ASSERT(baseCq->getCollator());
ASSERT(childCq->getCollator());
ASSERT_TRUE(*(childCq->getCollator()) == *(baseCq->getCollator()));
@@ -642,12 +642,12 @@ TEST(CanonicalQueryTest, CanonicalQueryFromBaseQueryWithCollation) {
TEST(CanonicalQueryTest, SettingCollatorUpdatesCollatorAndMatchExpression) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson("{a: 'foo', b: {$in: ['bar', 'baz']}}"));
auto cq = assertGet(CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
ASSERT_EQUALS(2U, cq->root()->numChildren());
auto firstChild = cq->root()->getChild(0);
auto secondChild = cq->root()->getChild(1);
@@ -663,7 +663,7 @@ TEST(CanonicalQueryTest, SettingCollatorUpdatesCollatorAndMatchExpression) {
ASSERT(!inExpr->getCollator());
unique_ptr<CollatorInterface> collator =
- assertGet(CollatorFactoryInterface::get(txn->getServiceContext())
+ assertGet(CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(BSON("locale"
<< "reverse")));
cq->setCollator(std::move(collator));
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index f60bbe33ae5..d8c9feb8fde 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -80,7 +80,7 @@ bool isCursorAwaitData(const ClientCursor* cursor) {
return cursor->queryOptions() & QueryOption_AwaitData;
}
-bool shouldSaveCursor(OperationContext* txn,
+bool shouldSaveCursor(OperationContext* opCtx,
const Collection* collection,
PlanExecutor::ExecState finalState,
PlanExecutor* exec) {
@@ -100,7 +100,7 @@ bool shouldSaveCursor(OperationContext* txn,
// an empty collection. Right now we do not keep a cursor if the collection
// has zero records.
if (qr.isTailable()) {
- return collection && collection->numRecords(txn) != 0U;
+ return collection && collection->numRecords(opCtx) != 0U;
}
return !exec->isEOF();
@@ -120,25 +120,25 @@ bool shouldSaveCursorGetMore(PlanExecutor::ExecState finalState,
return !exec->isEOF();
}
-void beginQueryOp(OperationContext* txn,
+void beginQueryOp(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& queryObj,
long long ntoreturn,
long long ntoskip) {
- auto curOp = CurOp::get(txn);
+ auto curOp = CurOp::get(opCtx);
curOp->debug().ntoreturn = ntoreturn;
curOp->debug().ntoskip = ntoskip;
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp->setQuery_inlock(queryObj);
curOp->setNS_inlock(nss.ns());
}
-void endQueryOp(OperationContext* txn,
+void endQueryOp(OperationContext* opCtx,
Collection* collection,
const PlanExecutor& exec,
long long numResults,
CursorId cursorId) {
- auto curOp = CurOp::get(txn);
+ auto curOp = CurOp::get(opCtx);
// Fill out basic CurOp query exec properties.
curOp->debug().nreturned = numResults;
@@ -151,7 +151,7 @@ void endQueryOp(OperationContext* txn,
curOp->debug().setPlanSummaryMetrics(summaryStats);
if (collection) {
- collection->infoCache()->notifyOfQuery(txn, summaryStats.indexesUsed);
+ collection->infoCache()->notifyOfQuery(opCtx, summaryStats.indexesUsed);
}
if (curOp->shouldDBProfile()) {
@@ -219,7 +219,7 @@ void generateBatch(int ntoreturn,
/**
* Called by db/instance.cpp. This is the getMore entry point.
*/
-Message getMore(OperationContext* txn,
+Message getMore(OperationContext* opCtx,
const char* ns,
int ntoreturn,
long long cursorid,
@@ -227,7 +227,7 @@ Message getMore(OperationContext* txn,
bool* isCursorAuthorized) {
invariant(ntoreturn >= 0);
- CurOp& curOp = *CurOp::get(txn);
+ CurOp& curOp = *CurOp::get(opCtx);
// For testing, we may want to fail if we receive a getmore.
if (MONGO_FAIL_POINT(failReceivedGetmore)) {
@@ -267,7 +267,7 @@ Message getMore(OperationContext* txn,
// the data within a collection.
cursorManager = CursorManager::getGlobalCursorManager();
} else {
- ctx = stdx::make_unique<AutoGetCollectionOrViewForRead>(txn, nss);
+ ctx = stdx::make_unique<AutoGetCollectionOrViewForRead>(opCtx, nss);
auto viewCtx = static_cast<AutoGetCollectionOrViewForRead*>(ctx.get());
if (viewCtx->getView()) {
uasserted(
@@ -290,7 +290,7 @@ Message getMore(OperationContext* txn,
// reads are allowed is PRIMARY (or master in master/slave). This function uasserts if
// reads are not okay.
Status status =
- repl::getGlobalReplicationCoordinator()->checkCanServeReadsFor_UNSAFE(txn, nss, true);
+ repl::getGlobalReplicationCoordinator()->checkCanServeReadsFor_UNSAFE(opCtx, nss, true);
uassertStatusOK(status);
// A pin performs a CC lookup and if there is a CC, increments the CC's pin value so it
@@ -328,7 +328,7 @@ Message getMore(OperationContext* txn,
*isCursorAuthorized = true;
if (cc->isReadCommitted())
- uassertStatusOK(txn->recoveryUnit()->setReadFromMajorityCommittedSnapshot());
+ uassertStatusOK(opCtx->recoveryUnit()->setReadFromMajorityCommittedSnapshot());
// Reset timeout timer on the cursor since the cursor is still in use.
cc->resetIdleTime();
@@ -338,12 +338,12 @@ Message getMore(OperationContext* txn,
if (cc->getLeftoverMaxTimeMicros() < Microseconds::max()) {
uassert(40136,
"Illegal attempt to set operation deadline within DBDirectClient",
- !txn->getClient()->isInDirectClient());
- txn->setDeadlineAfterNowBy(cc->getLeftoverMaxTimeMicros());
+ !opCtx->getClient()->isInDirectClient());
+ opCtx->setDeadlineAfterNowBy(cc->getLeftoverMaxTimeMicros());
}
- txn->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
+ opCtx->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
- cc->updateSlaveLocation(txn);
+ cc->updateSlaveLocation(opCtx);
if (cc->isAggCursor()) {
// Agg cursors handle their own locking internally.
@@ -372,12 +372,12 @@ Message getMore(OperationContext* txn,
}
PlanExecutor* exec = cc->getExecutor();
- exec->reattachToOperationContext(txn);
+ exec->reattachToOperationContext(opCtx);
exec->restoreState();
auto planSummary = Explain::getPlanSummary(exec);
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp.setPlanSummary_inlock(planSummary);
// Ensure that the original query object is available in the slow query log, profiler
@@ -414,7 +414,7 @@ Message getMore(OperationContext* txn,
curOp.setExpectedLatencyMs(durationCount<Milliseconds>(timeout));
// Reacquiring locks.
- ctx = make_unique<AutoGetCollectionForRead>(txn, nss);
+ ctx = make_unique<AutoGetCollectionForRead>(opCtx, nss);
exec->restoreState();
// We woke up because either the timed_wait expired, or there was more data. Either
@@ -449,8 +449,9 @@ Message getMore(OperationContext* txn,
// if the cursor is aggregation, we release these locks.
if (cc->isAggCursor()) {
invariant(NULL == ctx.get());
- unpinDBLock = make_unique<Lock::DBLock>(txn->lockState(), nss.db(), MODE_IS);
- unpinCollLock = make_unique<Lock::CollectionLock>(txn->lockState(), nss.ns(), MODE_IS);
+ unpinDBLock = make_unique<Lock::DBLock>(opCtx->lockState(), nss.db(), MODE_IS);
+ unpinCollLock =
+ make_unique<Lock::CollectionLock>(opCtx->lockState(), nss.ns(), MODE_IS);
}
// Our two possible ClientCursorPin cleanup paths are:
@@ -486,7 +487,7 @@ Message getMore(OperationContext* txn,
// If the getmore had a time limit, remaining time is "rolled over" back to the
// cursor (for use by future getmore ops).
- cc->setLeftoverMaxTimeMicros(txn->getRemainingMaxTimeMicros());
+ cc->setLeftoverMaxTimeMicros(opCtx->getRemainingMaxTimeMicros());
}
}
@@ -501,11 +502,11 @@ Message getMore(OperationContext* txn,
return Message(bb.release());
}
-std::string runQuery(OperationContext* txn,
+std::string runQuery(OperationContext* opCtx,
QueryMessage& q,
const NamespaceString& nss,
Message& result) {
- CurOp& curOp = *CurOp::get(txn);
+ CurOp& curOp = *CurOp::get(opCtx);
uassert(ErrorCodes::InvalidNamespace,
str::stream() << "Invalid ns [" << nss.ns() << "]",
@@ -513,11 +514,11 @@ std::string runQuery(OperationContext* txn,
invariant(!nss.isCommand());
// Set CurOp information.
- beginQueryOp(txn, nss, q.query, q.ntoreturn, q.ntoskip);
+ beginQueryOp(opCtx, nss, q.query, q.ntoreturn, q.ntoskip);
// Parse the qm into a CanonicalQuery.
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, q, ExtensionsCallbackReal(txn, &nss));
+ auto statusWithCQ = CanonicalQuery::canonicalize(opCtx, q, ExtensionsCallbackReal(opCtx, &nss));
if (!statusWithCQ.isOK()) {
uasserted(17287,
str::stream() << "Can't canonicalize query: "
@@ -530,7 +531,7 @@ std::string runQuery(OperationContext* txn,
LOG(2) << "Running query: " << redact(cq->toStringShort());
// Parse, canonicalize, plan, transcribe, and get a plan executor.
- AutoGetCollectionOrViewForRead ctx(txn, nss);
+ AutoGetCollectionOrViewForRead ctx(opCtx, nss);
Collection* collection = ctx.getCollection();
if (ctx.getView()) {
@@ -544,7 +545,7 @@ std::string runQuery(OperationContext* txn,
// We have a parsed query. Time to get the execution plan for it.
std::unique_ptr<PlanExecutor> exec = uassertStatusOK(
- getExecutorFind(txn, collection, nss, std::move(cq), PlanExecutor::YIELD_AUTO));
+ getExecutorFind(opCtx, collection, nss, std::move(cq), PlanExecutor::YIELD_AUTO));
const QueryRequest& qr = exec->getCanonicalQuery()->getQueryRequest();
@@ -578,15 +579,15 @@ std::string runQuery(OperationContext* txn,
if (qr.getMaxTimeMS() > 0) {
uassert(40116,
"Illegal attempt to set operation deadline within DBDirectClient",
- !txn->getClient()->isInDirectClient());
- txn->setDeadlineAfterNowBy(Milliseconds{qr.getMaxTimeMS()});
+ !opCtx->getClient()->isInDirectClient());
+ opCtx->setDeadlineAfterNowBy(Milliseconds{qr.getMaxTimeMS()});
}
- txn->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
+ opCtx->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
// uassert if we are not on a primary, and not a secondary with SlaveOk query parameter set.
bool slaveOK = qr.isSlaveOk() || qr.hasReadPref();
Status serveReadsStatus =
- repl::getGlobalReplicationCoordinator()->checkCanServeReadsFor_UNSAFE(txn, nss, slaveOK);
+ repl::getGlobalReplicationCoordinator()->checkCanServeReadsFor_UNSAFE(opCtx, nss, slaveOK);
uassertStatusOK(serveReadsStatus);
// Run the query.
@@ -607,7 +608,7 @@ std::string runQuery(OperationContext* txn,
// Get summary info about which plan the executor is using.
{
- stdx::lock_guard<Client> lk(*txn->getClient());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
curOp.setPlanSummary_inlock(Explain::getPlanSummary(exec.get()));
}
@@ -657,14 +658,14 @@ std::string runQuery(OperationContext* txn,
// Before saving the cursor, ensure that whatever plan we established happened with the expected
// collection version
- auto css = CollectionShardingState::get(txn, nss);
- css->checkShardVersionOrThrow(txn);
+ auto css = CollectionShardingState::get(opCtx, nss);
+ css->checkShardVersionOrThrow(opCtx);
// Fill out CurOp based on query results. If we have a cursorid, we will fill out CurOp with
// this cursorid later.
long long ccId = 0;
- if (shouldSaveCursor(txn, collection, state, exec.get())) {
+ if (shouldSaveCursor(opCtx, collection, state, exec.get())) {
// We won't use the executor until it's getMore'd.
exec->saveState();
exec->detachFromOperationContext();
@@ -673,7 +674,7 @@ std::string runQuery(OperationContext* txn,
ClientCursorPin pinnedCursor = collection->getCursorManager()->registerCursor(
{exec.release(),
nss.ns(),
- txn->recoveryUnit()->isReadingFromMajorityCommittedSnapshot(),
+ opCtx->recoveryUnit()->isReadingFromMajorityCommittedSnapshot(),
qr.getOptions(),
upconvertQueryEntry(q.query, qr.nss(), q.ntoreturn, q.ntoskip)});
ccId = pinnedCursor.getCursor()->cursorid();
@@ -695,12 +696,12 @@ std::string runQuery(OperationContext* txn,
// If the query had a time limit, remaining time is "rolled over" to the cursor (for
// use by future getmore ops).
- pinnedCursor.getCursor()->setLeftoverMaxTimeMicros(txn->getRemainingMaxTimeMicros());
+ pinnedCursor.getCursor()->setLeftoverMaxTimeMicros(opCtx->getRemainingMaxTimeMicros());
- endQueryOp(txn, collection, *pinnedCursor.getCursor()->getExecutor(), numResults, ccId);
+ endQueryOp(opCtx, collection, *pinnedCursor.getCursor()->getExecutor(), numResults, ccId);
} else {
LOG(5) << "Not caching executor but returning " << numResults << " results.";
- endQueryOp(txn, collection, *exec, numResults, ccId);
+ endQueryOp(opCtx, collection, *exec, numResults, ccId);
}
// Fill out the output buffer's header.
diff --git a/src/mongo/db/query/find.h b/src/mongo/db/query/find.h
index e6c8160b5cb..2795934ec87 100644
--- a/src/mongo/db/query/find.h
+++ b/src/mongo/db/query/find.h
@@ -58,7 +58,7 @@ bool isCursorAwaitData(const ClientCursor* cursor);
* If false, the caller should close the cursor and indicate this to the client by sending back
* a cursor ID of 0.
*/
-bool shouldSaveCursor(OperationContext* txn,
+bool shouldSaveCursor(OperationContext* opCtx,
const Collection* collection,
PlanExecutor::ExecState finalState,
PlanExecutor* exec);
@@ -75,21 +75,21 @@ bool shouldSaveCursorGetMore(PlanExecutor::ExecState finalState,
bool isTailable);
/**
- * Fills out the CurOp for "txn" with information about this query.
+ * Fills out the CurOp for "opCtx" with information about this query.
*/
-void beginQueryOp(OperationContext* txn,
+void beginQueryOp(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& queryObj,
long long ntoreturn,
long long ntoskip);
/**
- * 1) Fills out CurOp for "txn" with information regarding this query's execution.
+ * 1) Fills out CurOp for "opCtx" with information regarding this query's execution.
* 2) Reports index usage to the CollectionInfoCache.
*
* Uses explain functionality to extract stats from 'exec'.
*/
-void endQueryOp(OperationContext* txn,
+void endQueryOp(OperationContext* opCtx,
Collection* collection,
const PlanExecutor& exec,
long long numResults,
@@ -103,7 +103,7 @@ void endQueryOp(OperationContext* txn,
* The oplog start finding hack requires that 'cq' has a $gt or $gte predicate over
* a field named 'ts'.
*/
-StatusWith<std::unique_ptr<PlanExecutor>> getOplogStartHack(OperationContext* txn,
+StatusWith<std::unique_ptr<PlanExecutor>> getOplogStartHack(OperationContext* opCtx,
Collection* collection,
std::unique_ptr<CanonicalQuery> cq);
@@ -111,7 +111,7 @@ StatusWith<std::unique_ptr<PlanExecutor>> getOplogStartHack(OperationContext* tx
* Called from the getMore entry point in ops/query.cpp.
* Returned buffer is the message to return to the client.
*/
-Message getMore(OperationContext* txn,
+Message getMore(OperationContext* opCtx,
const char* ns,
int ntoreturn,
long long cursorid,
@@ -121,7 +121,7 @@ Message getMore(OperationContext* txn,
/**
* Run the query 'q' and place the result in 'result'.
*/
-std::string runQuery(OperationContext* txn,
+std::string runQuery(OperationContext* opCtx,
QueryMessage& q,
const NamespaceString& ns,
Message& result);
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index eea6597cdce..9aa85a7d81a 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -122,19 +122,19 @@ bool turnIxscanIntoCount(QuerySolution* soln);
} // namespace
-void fillOutPlannerParams(OperationContext* txn,
+void fillOutPlannerParams(OperationContext* opCtx,
Collection* collection,
CanonicalQuery* canonicalQuery,
QueryPlannerParams* plannerParams) {
// If it's not NULL, we may have indices. Access the catalog and fill out IndexEntry(s)
- IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator(txn, false);
+ IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator(opCtx, false);
while (ii.more()) {
const IndexDescriptor* desc = ii.next();
IndexCatalogEntry* ice = ii.catalogEntry(desc);
plannerParams->indices.push_back(IndexEntry(desc->keyPattern(),
desc->getAccessMethodName(),
- desc->isMultikey(txn),
- ice->getMultikeyPaths(txn),
+ desc->isMultikey(opCtx),
+ ice->getMultikeyPaths(opCtx),
desc->isSparse(),
desc->unique(),
desc->indexName(),
@@ -174,7 +174,8 @@ void fillOutPlannerParams(OperationContext* txn,
// If the caller wants a shard filter, make sure we're actually sharded.
if (plannerParams->options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
- auto collMetadata = CollectionShardingState::get(txn, canonicalQuery->nss())->getMetadata();
+ auto collMetadata =
+ CollectionShardingState::get(opCtx, canonicalQuery->nss())->getMetadata();
if (collMetadata) {
plannerParams->shardKey = collMetadata->getKeyPattern();
} else {
@@ -459,21 +460,21 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
} // namespace
-StatusWith<unique_ptr<PlanExecutor>> getExecutor(OperationContext* txn,
+StatusWith<unique_ptr<PlanExecutor>> getExecutor(OperationContext* opCtx,
Collection* collection,
unique_ptr<CanonicalQuery> canonicalQuery,
PlanExecutor::YieldPolicy yieldPolicy,
size_t plannerOptions) {
unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
StatusWith<PrepareExecutionResult> executionResult =
- prepareExecution(txn, collection, ws.get(), std::move(canonicalQuery), plannerOptions);
+ prepareExecution(opCtx, collection, ws.get(), std::move(canonicalQuery), plannerOptions);
if (!executionResult.isOK()) {
return executionResult.getStatus();
}
invariant(executionResult.getValue().root);
// We must have a tree of stages in order to have a valid plan executor, but the query
// solution may be null.
- return PlanExecutor::make(txn,
+ return PlanExecutor::make(opCtx,
std::move(ws),
std::move(executionResult.getValue().root),
std::move(executionResult.getValue().querySolution),
@@ -506,7 +507,7 @@ mongo::BSONElement extractOplogTsOptime(const mongo::MatchExpression* me) {
return static_cast<const mongo::ComparisonMatchExpression*>(me)->getData();
}
-StatusWith<unique_ptr<PlanExecutor>> getOplogStartHack(OperationContext* txn,
+StatusWith<unique_ptr<PlanExecutor>> getOplogStartHack(OperationContext* opCtx,
Collection* collection,
unique_ptr<CanonicalQuery> cq) {
invariant(collection);
@@ -555,7 +556,7 @@ StatusWith<unique_ptr<PlanExecutor>> getOplogStartHack(OperationContext* txn,
if (tsElem.type() == bsonTimestamp) {
StatusWith<RecordId> goal = oploghack::keyForOptime(tsElem.timestamp());
if (goal.isOK()) {
- startLoc = collection->getRecordStore()->oplogStartHack(txn, goal.getValue());
+ startLoc = collection->getRecordStore()->oplogStartHack(opCtx, goal.getValue());
}
}
@@ -567,10 +568,10 @@ StatusWith<unique_ptr<PlanExecutor>> getOplogStartHack(OperationContext* txn,
// Fallback to trying the OplogStart stage.
unique_ptr<WorkingSet> oplogws = make_unique<WorkingSet>();
unique_ptr<OplogStart> stage =
- make_unique<OplogStart>(txn, collection, tsExpr, oplogws.get());
+ make_unique<OplogStart>(opCtx, collection, tsExpr, oplogws.get());
// Takes ownership of oplogws and stage.
auto statusWithPlanExecutor = PlanExecutor::make(
- txn, std::move(oplogws), std::move(stage), collection, PlanExecutor::YIELD_AUTO);
+ opCtx, std::move(oplogws), std::move(stage), collection, PlanExecutor::YIELD_AUTO);
invariant(statusWithPlanExecutor.isOK());
unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
@@ -580,7 +581,7 @@ StatusWith<unique_ptr<PlanExecutor>> getOplogStartHack(OperationContext* txn,
// This is normal. The start of the oplog is the beginning of the collection.
if (PlanExecutor::IS_EOF == state) {
- return getExecutor(txn, collection, std::move(cq), PlanExecutor::YIELD_AUTO);
+ return getExecutor(opCtx, collection, std::move(cq), PlanExecutor::YIELD_AUTO);
}
// This is not normal. An error was encountered.
@@ -605,29 +606,30 @@ StatusWith<unique_ptr<PlanExecutor>> getOplogStartHack(OperationContext* txn,
}
unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
- unique_ptr<CollectionScan> cs = make_unique<CollectionScan>(txn, params, ws.get(), cq->root());
+ unique_ptr<CollectionScan> cs =
+ make_unique<CollectionScan>(opCtx, params, ws.get(), cq->root());
// Takes ownership of 'ws', 'cs', and 'cq'.
return PlanExecutor::make(
- txn, std::move(ws), std::move(cs), std::move(cq), collection, PlanExecutor::YIELD_AUTO);
+ opCtx, std::move(ws), std::move(cs), std::move(cq), collection, PlanExecutor::YIELD_AUTO);
}
} // namespace
-StatusWith<unique_ptr<PlanExecutor>> getExecutorFind(OperationContext* txn,
+StatusWith<unique_ptr<PlanExecutor>> getExecutorFind(OperationContext* opCtx,
Collection* collection,
const NamespaceString& nss,
unique_ptr<CanonicalQuery> canonicalQuery,
PlanExecutor::YieldPolicy yieldPolicy) {
if (NULL != collection && canonicalQuery->getQueryRequest().isOplogReplay()) {
- return getOplogStartHack(txn, collection, std::move(canonicalQuery));
+ return getOplogStartHack(opCtx, collection, std::move(canonicalQuery));
}
size_t options = QueryPlannerParams::DEFAULT;
- if (ShardingState::get(txn)->needCollectionMetadata(txn, nss.ns())) {
+ if (ShardingState::get(opCtx)->needCollectionMetadata(opCtx, nss.ns())) {
options |= QueryPlannerParams::INCLUDE_SHARD_FILTER;
}
return getExecutor(
- txn, collection, std::move(canonicalQuery), PlanExecutor::YIELD_AUTO, options);
+ opCtx, collection, std::move(canonicalQuery), PlanExecutor::YIELD_AUTO, options);
}
namespace {
@@ -639,7 +641,7 @@ namespace {
* If the projection was valid, then return Status::OK() with a pointer to the newly created
* ProjectionStage. Otherwise, return a status indicating the error reason.
*/
-StatusWith<unique_ptr<PlanStage>> applyProjection(OperationContext* txn,
+StatusWith<unique_ptr<PlanStage>> applyProjection(OperationContext* opCtx,
const NamespaceString& nsString,
CanonicalQuery* cq,
const BSONObj& proj,
@@ -670,11 +672,11 @@ StatusWith<unique_ptr<PlanStage>> applyProjection(OperationContext* txn,
"Cannot use a $meta sortKey projection in findAndModify commands."};
}
- ProjectionStageParams params(ExtensionsCallbackReal(txn, &nsString));
+ ProjectionStageParams params(ExtensionsCallbackReal(opCtx, &nsString));
params.projObj = proj;
params.collator = cq->getCollator();
params.fullExpression = cq->root();
- return {make_unique<ProjectionStage>(txn, params, ws, root.release())};
+ return {make_unique<ProjectionStage>(opCtx, params, ws, root.release())};
}
} // namespace
@@ -683,7 +685,7 @@ StatusWith<unique_ptr<PlanStage>> applyProjection(OperationContext* txn,
// Delete
//
-StatusWith<unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* txn,
+StatusWith<unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* opCtx,
OpDebug* opDebug,
Collection* collection,
ParsedDelete* parsedDelete) {
@@ -705,8 +707,8 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* txn,
str::stream() << "cannot remove from a capped collection: " << nss.ns());
}
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, nss);
+ bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, nss);
if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::PrimarySteppedDown,
@@ -736,16 +738,17 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* txn,
LOG(2) << "Collection " << nss.ns() << " does not exist."
<< " Using EOF stage: " << redact(unparsedQuery);
auto deleteStage = make_unique<DeleteStage>(
- txn, deleteStageParams, ws.get(), nullptr, new EOFStage(txn));
- return PlanExecutor::make(txn, std::move(ws), std::move(deleteStage), nss.ns(), policy);
+ opCtx, deleteStageParams, ws.get(), nullptr, new EOFStage(opCtx));
+ return PlanExecutor::make(
+ opCtx, std::move(ws), std::move(deleteStage), nss.ns(), policy);
}
- const IndexDescriptor* descriptor = collection->getIndexCatalog()->findIdIndex(txn);
+ const IndexDescriptor* descriptor = collection->getIndexCatalog()->findIdIndex(opCtx);
// Construct delete request collator.
std::unique_ptr<CollatorInterface> collator;
if (!request->getCollation().isEmpty()) {
- auto statusWithCollator = CollatorFactoryInterface::get(txn->getServiceContext())
+ auto statusWithCollator = CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(request->getCollation());
if (!statusWithCollator.isOK()) {
return statusWithCollator.getStatus();
@@ -759,11 +762,11 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* txn,
request->getProj().isEmpty() && hasCollectionDefaultCollation) {
LOG(2) << "Using idhack: " << redact(unparsedQuery);
- PlanStage* idHackStage =
- new IDHackStage(txn, collection, unparsedQuery["_id"].wrap(), ws.get(), descriptor);
- unique_ptr<DeleteStage> root =
- make_unique<DeleteStage>(txn, deleteStageParams, ws.get(), collection, idHackStage);
- return PlanExecutor::make(txn, std::move(ws), std::move(root), collection, policy);
+ PlanStage* idHackStage = new IDHackStage(
+ opCtx, collection, unparsedQuery["_id"].wrap(), ws.get(), descriptor);
+ unique_ptr<DeleteStage> root = make_unique<DeleteStage>(
+ opCtx, deleteStageParams, ws.get(), collection, idHackStage);
+ return PlanExecutor::make(opCtx, std::move(ws), std::move(root), collection, policy);
}
// If we're here then we don't have a parsed query, but we're also not eligible for
@@ -779,7 +782,7 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* txn,
const size_t defaultPlannerOptions = 0;
StatusWith<PrepareExecutionResult> executionResult =
- prepareExecution(txn, collection, ws.get(), std::move(cq), defaultPlannerOptions);
+ prepareExecution(opCtx, collection, ws.get(), std::move(cq), defaultPlannerOptions);
if (!executionResult.isOK()) {
return executionResult.getStatus();
}
@@ -790,14 +793,14 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* txn,
deleteStageParams.canonicalQuery = cq.get();
invariant(root);
- root = make_unique<DeleteStage>(txn, deleteStageParams, ws.get(), collection, root.release());
+ root = make_unique<DeleteStage>(opCtx, deleteStageParams, ws.get(), collection, root.release());
if (!request->getProj().isEmpty()) {
invariant(request->shouldReturnDeleted());
const bool allowPositional = true;
StatusWith<unique_ptr<PlanStage>> projStatus = applyProjection(
- txn, nss, cq.get(), request->getProj(), allowPositional, ws.get(), std::move(root));
+ opCtx, nss, cq.get(), request->getProj(), allowPositional, ws.get(), std::move(root));
if (!projStatus.isOK()) {
return projStatus.getStatus();
}
@@ -806,7 +809,7 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* txn,
// We must have a tree of stages in order to have a valid plan executor, but the query
// solution may be null.
- return PlanExecutor::make(txn,
+ return PlanExecutor::make(opCtx,
std::move(ws),
std::move(root),
std::move(querySolution),
@@ -837,7 +840,7 @@ inline void validateUpdate(const char* ns, const BSONObj& updateobj, const BSONO
} // namespace
-StatusWith<unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn,
+StatusWith<unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* opCtx,
OpDebug* opDebug,
Collection* collection,
ParsedUpdate* parsedUpdate) {
@@ -871,8 +874,8 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn,
// If this is a user-issued update, then we want to return an error: you cannot perform
// writes on a secondary. If this is an update to a secondary from the replication system,
// however, then we make an exception and let the write proceed.
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, nsString);
+ bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, nsString);
if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::PrimarySteppedDown,
@@ -881,7 +884,7 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn,
if (lifecycle) {
lifecycle->setCollection(collection);
- driver->refreshIndexKeys(lifecycle->getIndexKeys(txn));
+ driver->refreshIndexKeys(lifecycle->getIndexKeys(opCtx));
}
const PlanExecutor::YieldPolicy policy = parsedUpdate->yieldPolicy();
@@ -901,12 +904,12 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn,
LOG(2) << "Collection " << nsString.ns() << " does not exist."
<< " Using EOF stage: " << redact(unparsedQuery);
auto updateStage = make_unique<UpdateStage>(
- txn, updateStageParams, ws.get(), collection, new EOFStage(txn));
+ opCtx, updateStageParams, ws.get(), collection, new EOFStage(opCtx));
return PlanExecutor::make(
- txn, std::move(ws), std::move(updateStage), nsString.ns(), policy);
+ opCtx, std::move(ws), std::move(updateStage), nsString.ns(), policy);
}
- const IndexDescriptor* descriptor = collection->getIndexCatalog()->findIdIndex(txn);
+ const IndexDescriptor* descriptor = collection->getIndexCatalog()->findIdIndex(opCtx);
const bool hasCollectionDefaultCollation = CollatorInterface::collatorsMatch(
parsedUpdate->getCollator(), collection->getDefaultCollator());
@@ -915,11 +918,11 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn,
request->getProj().isEmpty() && hasCollectionDefaultCollation) {
LOG(2) << "Using idhack: " << redact(unparsedQuery);
- PlanStage* idHackStage =
- new IDHackStage(txn, collection, unparsedQuery["_id"].wrap(), ws.get(), descriptor);
- unique_ptr<UpdateStage> root =
- make_unique<UpdateStage>(txn, updateStageParams, ws.get(), collection, idHackStage);
- return PlanExecutor::make(txn, std::move(ws), std::move(root), collection, policy);
+ PlanStage* idHackStage = new IDHackStage(
+ opCtx, collection, unparsedQuery["_id"].wrap(), ws.get(), descriptor);
+ unique_ptr<UpdateStage> root = make_unique<UpdateStage>(
+ opCtx, updateStageParams, ws.get(), collection, idHackStage);
+ return PlanExecutor::make(opCtx, std::move(ws), std::move(root), collection, policy);
}
// If we're here then we don't have a parsed query, but we're also not eligible for
@@ -935,7 +938,7 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn,
const size_t defaultPlannerOptions = 0;
StatusWith<PrepareExecutionResult> executionResult =
- prepareExecution(txn, collection, ws.get(), std::move(cq), defaultPlannerOptions);
+ prepareExecution(opCtx, collection, ws.get(), std::move(cq), defaultPlannerOptions);
if (!executionResult.isOK()) {
return executionResult.getStatus();
}
@@ -947,7 +950,7 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn,
updateStageParams.canonicalQuery = cq.get();
root = stdx::make_unique<UpdateStage>(
- txn, updateStageParams, ws.get(), collection, root.release());
+ opCtx, updateStageParams, ws.get(), collection, root.release());
if (!request->getProj().isEmpty()) {
invariant(request->shouldReturnAnyDocs());
@@ -956,7 +959,7 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn,
// is invalid to use a positional projection because the query expression need not
// match the array element after the update has been applied.
const bool allowPositional = request->shouldReturnOldDocs();
- StatusWith<unique_ptr<PlanStage>> projStatus = applyProjection(txn,
+ StatusWith<unique_ptr<PlanStage>> projStatus = applyProjection(opCtx,
nsString,
cq.get(),
request->getProj(),
@@ -970,8 +973,8 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn,
}
// We must have a tree of stages in order to have a valid plan executor, but the query
- // solution may be null. Takes ownership of all args other than 'collection' and 'txn'
- return PlanExecutor::make(txn,
+ // solution may be null. Takes ownership of all args other than 'collection' and 'opCtx'
+ return PlanExecutor::make(opCtx,
std::move(ws),
std::move(root),
std::move(querySolution),
@@ -984,7 +987,7 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn,
// Group
//
-StatusWith<unique_ptr<PlanExecutor>> getExecutorGroup(OperationContext* txn,
+StatusWith<unique_ptr<PlanExecutor>> getExecutorGroup(OperationContext* opCtx,
Collection* collection,
const GroupRequest& request,
PlanExecutor::YieldPolicy yieldPolicy) {
@@ -999,10 +1002,10 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorGroup(OperationContext* txn,
// reporting machinery always assumes that the root stage for a group operation is a
// GroupStage, so in this case we put a GroupStage on top of an EOFStage.
unique_ptr<PlanStage> root =
- make_unique<GroupStage>(txn, request, ws.get(), new EOFStage(txn));
+ make_unique<GroupStage>(opCtx, request, ws.get(), new EOFStage(opCtx));
return PlanExecutor::make(
- txn, std::move(ws), std::move(root), request.ns.ns(), yieldPolicy);
+ opCtx, std::move(ws), std::move(root), request.ns.ns(), yieldPolicy);
}
const NamespaceString nss(request.ns);
@@ -1011,9 +1014,9 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorGroup(OperationContext* txn,
qr->setCollation(request.collation);
qr->setExplain(request.explain);
- const ExtensionsCallbackReal extensionsCallback(txn, &nss);
+ const ExtensionsCallbackReal extensionsCallback(opCtx, &nss);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
+ auto statusWithCQ = CanonicalQuery::canonicalize(opCtx, std::move(qr), extensionsCallback);
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
@@ -1021,7 +1024,7 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorGroup(OperationContext* txn,
const size_t defaultPlannerOptions = 0;
StatusWith<PrepareExecutionResult> executionResult = prepareExecution(
- txn, collection, ws.get(), std::move(canonicalQuery), defaultPlannerOptions);
+ opCtx, collection, ws.get(), std::move(canonicalQuery), defaultPlannerOptions);
if (!executionResult.isOK()) {
return executionResult.getStatus();
}
@@ -1031,10 +1034,10 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorGroup(OperationContext* txn,
invariant(root);
- root = make_unique<GroupStage>(txn, request, ws.get(), root.release());
+ root = make_unique<GroupStage>(opCtx, request, ws.get(), root.release());
// We must have a tree of stages in order to have a valid plan executor, but the query
// solution may be null. Takes ownership of all args other than 'collection'.
- return PlanExecutor::make(txn,
+ return PlanExecutor::make(opCtx,
std::move(ws),
std::move(root),
std::move(querySolution),
@@ -1227,7 +1230,7 @@ BSONObj getDistinctProjection(const std::string& field) {
} // namespace
-StatusWith<unique_ptr<PlanExecutor>> getExecutorCount(OperationContext* txn,
+StatusWith<unique_ptr<PlanExecutor>> getExecutorCount(OperationContext* opCtx,
Collection* collection,
const CountRequest& request,
bool explain,
@@ -1241,11 +1244,11 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorCount(OperationContext* txn,
qr->setExplain(explain);
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn,
+ opCtx,
std::move(qr),
- collection
- ? static_cast<const ExtensionsCallback&>(ExtensionsCallbackReal(txn, &collection->ns()))
- : static_cast<const ExtensionsCallback&>(ExtensionsCallbackNoop()));
+ collection ? static_cast<const ExtensionsCallback&>(
+ ExtensionsCallbackReal(opCtx, &collection->ns()))
+ : static_cast<const ExtensionsCallback&>(ExtensionsCallbackNoop()));
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
@@ -1258,9 +1261,9 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorCount(OperationContext* txn,
const bool useRecordStoreCount = false;
CountStageParams params(request, useRecordStoreCount);
unique_ptr<PlanStage> root = make_unique<CountStage>(
- txn, collection, std::move(params), ws.get(), new EOFStage(txn));
+ opCtx, collection, std::move(params), ws.get(), new EOFStage(opCtx));
return PlanExecutor::make(
- txn, std::move(ws), std::move(root), request.getNs().ns(), yieldPolicy);
+ opCtx, std::move(ws), std::move(root), request.getNs().ns(), yieldPolicy);
}
// If the query is empty, then we can determine the count by just asking the collection
@@ -1275,14 +1278,14 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorCount(OperationContext* txn,
if (useRecordStoreCount) {
unique_ptr<PlanStage> root =
- make_unique<CountStage>(txn, collection, std::move(params), ws.get(), nullptr);
+ make_unique<CountStage>(opCtx, collection, std::move(params), ws.get(), nullptr);
return PlanExecutor::make(
- txn, std::move(ws), std::move(root), request.getNs().ns(), yieldPolicy);
+ opCtx, std::move(ws), std::move(root), request.getNs().ns(), yieldPolicy);
}
const size_t plannerOptions = QueryPlannerParams::IS_COUNT;
StatusWith<PrepareExecutionResult> executionResult =
- prepareExecution(txn, collection, ws.get(), std::move(cq), plannerOptions);
+ prepareExecution(opCtx, collection, ws.get(), std::move(cq), plannerOptions);
if (!executionResult.isOK()) {
return executionResult.getStatus();
}
@@ -1293,10 +1296,10 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorCount(OperationContext* txn,
invariant(root);
// Make a CountStage to be the new root.
- root = make_unique<CountStage>(txn, collection, std::move(params), ws.get(), root.release());
+ root = make_unique<CountStage>(opCtx, collection, std::move(params), ws.get(), root.release());
// We must have a tree of stages in order to have a valid plan executor, but the query
- // solution may be NULL. Takes ownership of all args other than 'collection' and 'txn'
- return PlanExecutor::make(txn,
+ // solution may be NULL. Takes ownership of all args other than 'collection' and 'opCtx'
+ return PlanExecutor::make(opCtx,
std::move(ws),
std::move(root),
std::move(querySolution),
@@ -1406,16 +1409,16 @@ bool turnIxscanIntoDistinctIxscan(QuerySolution* soln, const string& field) {
return true;
}
-StatusWith<unique_ptr<PlanExecutor>> getExecutorDistinct(OperationContext* txn,
+StatusWith<unique_ptr<PlanExecutor>> getExecutorDistinct(OperationContext* opCtx,
Collection* collection,
const std::string& ns,
ParsedDistinct* parsedDistinct,
PlanExecutor::YieldPolicy yieldPolicy) {
if (!collection) {
// Treat collections that do not exist as empty collections.
- return PlanExecutor::make(txn,
+ return PlanExecutor::make(opCtx,
make_unique<WorkingSet>(),
- make_unique<EOFStage>(txn),
+ make_unique<EOFStage>(opCtx),
parsedDistinct->releaseQuery(),
collection,
yieldPolicy);
@@ -1435,15 +1438,15 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDistinct(OperationContext* txn,
QueryPlannerParams plannerParams;
plannerParams.options = QueryPlannerParams::NO_TABLE_SCAN;
- IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator(txn, false);
+ IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator(opCtx, false);
while (ii.more()) {
const IndexDescriptor* desc = ii.next();
IndexCatalogEntry* ice = ii.catalogEntry(desc);
if (desc->keyPattern().hasField(parsedDistinct->getKey())) {
plannerParams.indices.push_back(IndexEntry(desc->keyPattern(),
desc->getAccessMethodName(),
- desc->isMultikey(txn),
- ice->getMultikeyPaths(txn),
+ desc->isMultikey(opCtx),
+ ice->getMultikeyPaths(opCtx),
desc->isSparse(),
desc->unique(),
desc->indexName(),
@@ -1453,12 +1456,12 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDistinct(OperationContext* txn,
}
}
- const ExtensionsCallbackReal extensionsCallback(txn, &collection->ns());
+ const ExtensionsCallbackReal extensionsCallback(opCtx, &collection->ns());
// If there are no suitable indices for the distinct hack bail out now into regular planning
// with no projection.
if (plannerParams.indices.empty()) {
- return getExecutor(txn, collection, parsedDistinct->releaseQuery(), yieldPolicy);
+ return getExecutor(opCtx, collection, parsedDistinct->releaseQuery(), yieldPolicy);
}
//
@@ -1473,7 +1476,7 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDistinct(OperationContext* txn,
auto qr = stdx::make_unique<QueryRequest>(parsedDistinct->getQuery()->getQueryRequest());
qr->setProj(projection);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
+ auto statusWithCQ = CanonicalQuery::canonicalize(opCtx, std::move(qr), extensionsCallback);
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
@@ -1518,13 +1521,13 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDistinct(OperationContext* txn,
unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
PlanStage* rawRoot;
- verify(StageBuilder::build(txn, collection, *cq, *soln, ws.get(), &rawRoot));
+ verify(StageBuilder::build(opCtx, collection, *cq, *soln, ws.get(), &rawRoot));
unique_ptr<PlanStage> root(rawRoot);
LOG(2) << "Using fast distinct: " << redact(cq->toStringShort())
<< ", planSummary: " << redact(Explain::getPlanSummary(root.get()));
- return PlanExecutor::make(txn,
+ return PlanExecutor::make(opCtx,
std::move(ws),
std::move(root),
std::move(soln),
@@ -1537,7 +1540,7 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDistinct(OperationContext* txn,
vector<QuerySolution*> solutions;
Status status = QueryPlanner::plan(*cq, plannerParams, &solutions);
if (!status.isOK()) {
- return getExecutor(txn, collection, std::move(cq), yieldPolicy);
+ return getExecutor(opCtx, collection, std::move(cq), yieldPolicy);
}
// We look for a solution that has an ixscan we can turn into a distinctixscan
@@ -1554,13 +1557,14 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDistinct(OperationContext* txn,
unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
unique_ptr<QuerySolution> currentSolution(solutions[i]);
PlanStage* rawRoot;
- verify(StageBuilder::build(txn, collection, *cq, *currentSolution, ws.get(), &rawRoot));
+ verify(
+ StageBuilder::build(opCtx, collection, *cq, *currentSolution, ws.get(), &rawRoot));
unique_ptr<PlanStage> root(rawRoot);
LOG(2) << "Using fast distinct: " << redact(cq->toStringShort())
<< ", planSummary: " << redact(Explain::getPlanSummary(root.get()));
- return PlanExecutor::make(txn,
+ return PlanExecutor::make(opCtx,
std::move(ws),
std::move(root),
std::move(currentSolution),
@@ -1577,7 +1581,7 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDistinct(OperationContext* txn,
delete solutions[i];
}
- return getExecutor(txn, collection, parsedDistinct->releaseQuery(), yieldPolicy);
+ return getExecutor(opCtx, collection, parsedDistinct->releaseQuery(), yieldPolicy);
}
} // namespace mongo
diff --git a/src/mongo/db/query/get_executor.h b/src/mongo/db/query/get_executor.h
index 48f7eefcf83..65708d135fd 100644
--- a/src/mongo/db/query/get_executor.h
+++ b/src/mongo/db/query/get_executor.h
@@ -58,7 +58,7 @@ void filterAllowedIndexEntries(const AllowedIndicesFilter& allowedIndicesFilter,
* Fill out the provided 'plannerParams' for the 'canonicalQuery' operating on the collection
* 'collection'. Exposed for testing.
*/
-void fillOutPlannerParams(OperationContext* txn,
+void fillOutPlannerParams(OperationContext* opCtx,
Collection* collection,
CanonicalQuery* canonicalQuery,
QueryPlannerParams* plannerParams);
@@ -72,7 +72,7 @@ void fillOutPlannerParams(OperationContext* txn,
* If the query cannot be executed, returns a Status indicating why.
*/
StatusWith<std::unique_ptr<PlanExecutor>> getExecutor(
- OperationContext* txn,
+ OperationContext* opCtx,
Collection* collection,
std::unique_ptr<CanonicalQuery> canonicalQuery,
PlanExecutor::YieldPolicy yieldPolicy,
@@ -87,7 +87,7 @@ StatusWith<std::unique_ptr<PlanExecutor>> getExecutor(
* If the query cannot be executed, returns a Status indicating why.
*/
StatusWith<std::unique_ptr<PlanExecutor>> getExecutorFind(
- OperationContext* txn,
+ OperationContext* opCtx,
Collection* collection,
const NamespaceString& nss,
std::unique_ptr<CanonicalQuery> canonicalQuery,
@@ -110,7 +110,7 @@ bool turnIxscanIntoDistinctIxscan(QuerySolution* soln, const std::string& field)
* body of method for detail).
*/
StatusWith<std::unique_ptr<PlanExecutor>> getExecutorDistinct(
- OperationContext* txn,
+ OperationContext* opCtx,
Collection* collection,
const std::string& ns,
ParsedDistinct* parsedDistinct,
@@ -123,7 +123,7 @@ StatusWith<std::unique_ptr<PlanExecutor>> getExecutorDistinct(
* As such, with certain covered queries, we can skip the overhead of fetching etc. when
* executing a count.
*/
-StatusWith<std::unique_ptr<PlanExecutor>> getExecutorCount(OperationContext* txn,
+StatusWith<std::unique_ptr<PlanExecutor>> getExecutorCount(OperationContext* opCtx,
Collection* collection,
const CountRequest& request,
bool explain,
@@ -145,7 +145,7 @@ StatusWith<std::unique_ptr<PlanExecutor>> getExecutorCount(OperationContext* txn
*
* If the query cannot be executed, returns a Status indicating why.
*/
-StatusWith<std::unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* txn,
+StatusWith<std::unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* opCtx,
OpDebug* opDebug,
Collection* collection,
ParsedDelete* parsedDelete);
@@ -167,7 +167,7 @@ StatusWith<std::unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* tx
*
* If the query cannot be executed, returns a Status indicating why.
*/
-StatusWith<std::unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn,
+StatusWith<std::unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* opCtx,
OpDebug* opDebug,
Collection* collection,
ParsedUpdate* parsedUpdate);
@@ -180,7 +180,7 @@ StatusWith<std::unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* tx
*
* If an executor could not be created, returns a Status indicating why.
*/
-StatusWith<std::unique_ptr<PlanExecutor>> getExecutorGroup(OperationContext* txn,
+StatusWith<std::unique_ptr<PlanExecutor>> getExecutorGroup(OperationContext* opCtx,
Collection* collection,
const GroupRequest& request,
PlanExecutor::YieldPolicy yieldPolicy);
diff --git a/src/mongo/db/query/get_executor_test.cpp b/src/mongo/db/query/get_executor_test.cpp
index a05c8e08e13..a1dc1f88648 100644
--- a/src/mongo/db/query/get_executor_test.cpp
+++ b/src/mongo/db/query/get_executor_test.cpp
@@ -59,14 +59,14 @@ unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
const char* sortStr,
const char* projStr) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson(queryStr));
qr->setSort(fromjson(sortStr));
qr->setProj(fromjson(projStr));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
diff --git a/src/mongo/db/query/internal_plans.cpp b/src/mongo/db/query/internal_plans.cpp
index 3889e82031b..ade228a0223 100644
--- a/src/mongo/db/query/internal_plans.cpp
+++ b/src/mongo/db/query/internal_plans.cpp
@@ -41,7 +41,7 @@
namespace mongo {
-std::unique_ptr<PlanExecutor> InternalPlanner::collectionScan(OperationContext* txn,
+std::unique_ptr<PlanExecutor> InternalPlanner::collectionScan(OperationContext* opCtx,
StringData ns,
Collection* collection,
PlanExecutor::YieldPolicy yieldPolicy,
@@ -50,27 +50,27 @@ std::unique_ptr<PlanExecutor> InternalPlanner::collectionScan(OperationContext*
std::unique_ptr<WorkingSet> ws = stdx::make_unique<WorkingSet>();
if (NULL == collection) {
- auto eof = stdx::make_unique<EOFStage>(txn);
+ auto eof = stdx::make_unique<EOFStage>(opCtx);
// Takes ownership of 'ws' and 'eof'.
auto statusWithPlanExecutor =
- PlanExecutor::make(txn, std::move(ws), std::move(eof), ns.toString(), yieldPolicy);
+ PlanExecutor::make(opCtx, std::move(ws), std::move(eof), ns.toString(), yieldPolicy);
invariant(statusWithPlanExecutor.isOK());
return std::move(statusWithPlanExecutor.getValue());
}
invariant(ns == collection->ns().ns());
- auto cs = _collectionScan(txn, ws.get(), collection, direction, startLoc);
+ auto cs = _collectionScan(opCtx, ws.get(), collection, direction, startLoc);
// Takes ownership of 'ws' and 'cs'.
auto statusWithPlanExecutor =
- PlanExecutor::make(txn, std::move(ws), std::move(cs), collection, yieldPolicy);
+ PlanExecutor::make(opCtx, std::move(ws), std::move(cs), collection, yieldPolicy);
invariant(statusWithPlanExecutor.isOK());
return std::move(statusWithPlanExecutor.getValue());
}
std::unique_ptr<PlanExecutor> InternalPlanner::deleteWithCollectionScan(
- OperationContext* txn,
+ OperationContext* opCtx,
Collection* collection,
const DeleteStageParams& params,
PlanExecutor::YieldPolicy yieldPolicy,
@@ -78,18 +78,18 @@ std::unique_ptr<PlanExecutor> InternalPlanner::deleteWithCollectionScan(
const RecordId& startLoc) {
auto ws = stdx::make_unique<WorkingSet>();
- auto root = _collectionScan(txn, ws.get(), collection, direction, startLoc);
+ auto root = _collectionScan(opCtx, ws.get(), collection, direction, startLoc);
- root = stdx::make_unique<DeleteStage>(txn, params, ws.get(), collection, root.release());
+ root = stdx::make_unique<DeleteStage>(opCtx, params, ws.get(), collection, root.release());
auto executor =
- PlanExecutor::make(txn, std::move(ws), std::move(root), collection, yieldPolicy);
+ PlanExecutor::make(opCtx, std::move(ws), std::move(root), collection, yieldPolicy);
invariantOK(executor.getStatus());
return std::move(executor.getValue());
}
-std::unique_ptr<PlanExecutor> InternalPlanner::indexScan(OperationContext* txn,
+std::unique_ptr<PlanExecutor> InternalPlanner::indexScan(OperationContext* opCtx,
const Collection* collection,
const IndexDescriptor* descriptor,
const BSONObj& startKey,
@@ -100,7 +100,7 @@ std::unique_ptr<PlanExecutor> InternalPlanner::indexScan(OperationContext* txn,
int options) {
auto ws = stdx::make_unique<WorkingSet>();
- std::unique_ptr<PlanStage> root = _indexScan(txn,
+ std::unique_ptr<PlanStage> root = _indexScan(opCtx,
ws.get(),
collection,
descriptor,
@@ -111,13 +111,13 @@ std::unique_ptr<PlanExecutor> InternalPlanner::indexScan(OperationContext* txn,
options);
auto executor =
- PlanExecutor::make(txn, std::move(ws), std::move(root), collection, yieldPolicy);
+ PlanExecutor::make(opCtx, std::move(ws), std::move(root), collection, yieldPolicy);
invariantOK(executor.getStatus());
return std::move(executor.getValue());
}
std::unique_ptr<PlanExecutor> InternalPlanner::deleteWithIndexScan(
- OperationContext* txn,
+ OperationContext* opCtx,
Collection* collection,
const DeleteStageParams& params,
const IndexDescriptor* descriptor,
@@ -128,7 +128,7 @@ std::unique_ptr<PlanExecutor> InternalPlanner::deleteWithIndexScan(
Direction direction) {
auto ws = stdx::make_unique<WorkingSet>();
- std::unique_ptr<PlanStage> root = _indexScan(txn,
+ std::unique_ptr<PlanStage> root = _indexScan(opCtx,
ws.get(),
collection,
descriptor,
@@ -138,15 +138,15 @@ std::unique_ptr<PlanExecutor> InternalPlanner::deleteWithIndexScan(
direction,
InternalPlanner::IXSCAN_FETCH);
- root = stdx::make_unique<DeleteStage>(txn, params, ws.get(), collection, root.release());
+ root = stdx::make_unique<DeleteStage>(opCtx, params, ws.get(), collection, root.release());
auto executor =
- PlanExecutor::make(txn, std::move(ws), std::move(root), collection, yieldPolicy);
+ PlanExecutor::make(opCtx, std::move(ws), std::move(root), collection, yieldPolicy);
invariantOK(executor.getStatus());
return std::move(executor.getValue());
}
-std::unique_ptr<PlanStage> InternalPlanner::_collectionScan(OperationContext* txn,
+std::unique_ptr<PlanStage> InternalPlanner::_collectionScan(OperationContext* opCtx,
WorkingSet* ws,
const Collection* collection,
Direction direction,
@@ -163,10 +163,10 @@ std::unique_ptr<PlanStage> InternalPlanner::_collectionScan(OperationContext* tx
params.direction = CollectionScanParams::BACKWARD;
}
- return stdx::make_unique<CollectionScan>(txn, params, ws, nullptr);
+ return stdx::make_unique<CollectionScan>(opCtx, params, ws, nullptr);
}
-std::unique_ptr<PlanStage> InternalPlanner::_indexScan(OperationContext* txn,
+std::unique_ptr<PlanStage> InternalPlanner::_indexScan(OperationContext* opCtx,
WorkingSet* ws,
const Collection* collection,
const IndexDescriptor* descriptor,
@@ -186,10 +186,10 @@ std::unique_ptr<PlanStage> InternalPlanner::_indexScan(OperationContext* txn,
params.bounds.endKey = endKey;
params.bounds.boundInclusion = boundInclusion;
- std::unique_ptr<PlanStage> root = stdx::make_unique<IndexScan>(txn, params, ws, nullptr);
+ std::unique_ptr<PlanStage> root = stdx::make_unique<IndexScan>(opCtx, params, ws, nullptr);
if (InternalPlanner::IXSCAN_FETCH & options) {
- root = stdx::make_unique<FetchStage>(txn, ws, root.release(), nullptr, collection);
+ root = stdx::make_unique<FetchStage>(opCtx, ws, root.release(), nullptr, collection);
}
return root;
diff --git a/src/mongo/db/query/internal_plans.h b/src/mongo/db/query/internal_plans.h
index 40b42f75df1..f9da8e89f70 100644
--- a/src/mongo/db/query/internal_plans.h
+++ b/src/mongo/db/query/internal_plans.h
@@ -66,7 +66,7 @@ public:
/**
* Returns a collection scan. Caller owns pointer.
*/
- static std::unique_ptr<PlanExecutor> collectionScan(OperationContext* txn,
+ static std::unique_ptr<PlanExecutor> collectionScan(OperationContext* opCtx,
StringData ns,
Collection* collection,
PlanExecutor::YieldPolicy yieldPolicy,
@@ -77,7 +77,7 @@ public:
* Returns a FETCH => DELETE plan.
*/
static std::unique_ptr<PlanExecutor> deleteWithCollectionScan(
- OperationContext* txn,
+ OperationContext* opCtx,
Collection* collection,
const DeleteStageParams& params,
PlanExecutor::YieldPolicy yieldPolicy,
@@ -87,7 +87,7 @@ public:
/**
* Returns an index scan. Caller owns returned pointer.
*/
- static std::unique_ptr<PlanExecutor> indexScan(OperationContext* txn,
+ static std::unique_ptr<PlanExecutor> indexScan(OperationContext* opCtx,
const Collection* collection,
const IndexDescriptor* descriptor,
const BSONObj& startKey,
@@ -100,7 +100,7 @@ public:
/**
* Returns an IXSCAN => FETCH => DELETE plan.
*/
- static std::unique_ptr<PlanExecutor> deleteWithIndexScan(OperationContext* txn,
+ static std::unique_ptr<PlanExecutor> deleteWithIndexScan(OperationContext* opCtx,
Collection* collection,
const DeleteStageParams& params,
const IndexDescriptor* descriptor,
@@ -116,7 +116,7 @@ private:
*
* Used as a helper for collectionScan() and deleteWithCollectionScan().
*/
- static std::unique_ptr<PlanStage> _collectionScan(OperationContext* txn,
+ static std::unique_ptr<PlanStage> _collectionScan(OperationContext* opCtx,
WorkingSet* ws,
const Collection* collection,
Direction direction,
@@ -127,7 +127,7 @@ private:
*
* Used as a helper for indexScan() and deleteWithIndexScan().
*/
- static std::unique_ptr<PlanStage> _indexScan(OperationContext* txn,
+ static std::unique_ptr<PlanStage> _indexScan(OperationContext* opCtx,
WorkingSet* ws,
const Collection* collection,
const IndexDescriptor* descriptor,
diff --git a/src/mongo/db/query/parsed_distinct.cpp b/src/mongo/db/query/parsed_distinct.cpp
index 02de55ed4e8..8bd80ff405e 100644
--- a/src/mongo/db/query/parsed_distinct.cpp
+++ b/src/mongo/db/query/parsed_distinct.cpp
@@ -97,7 +97,7 @@ StatusWith<BSONObj> ParsedDistinct::asAggregationCommand() const {
return aggregationBuilder.obj();
}
-StatusWith<ParsedDistinct> ParsedDistinct::parse(OperationContext* txn,
+StatusWith<ParsedDistinct> ParsedDistinct::parse(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& cmdObj,
const ExtensionsCallback& extensionsCallback,
@@ -142,7 +142,7 @@ StatusWith<ParsedDistinct> ParsedDistinct::parse(OperationContext* txn,
qr->setExplain(isExplain);
- auto cq = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
+ auto cq = CanonicalQuery::canonicalize(opCtx, std::move(qr), extensionsCallback);
if (!cq.isOK()) {
return cq.getStatus();
}
diff --git a/src/mongo/db/query/parsed_distinct.h b/src/mongo/db/query/parsed_distinct.h
index 8e07116809b..eb1c4e2b0ab 100644
--- a/src/mongo/db/query/parsed_distinct.h
+++ b/src/mongo/db/query/parsed_distinct.h
@@ -78,7 +78,7 @@ public:
* 'extensionsCallback' allows for additional mongod parsing. If called from mongos, an
* ExtensionsCallbackNoop object should be passed to skip this parsing.
*/
- static StatusWith<ParsedDistinct> parse(OperationContext* txn,
+ static StatusWith<ParsedDistinct> parse(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& cmdObj,
const ExtensionsCallback& extensionsCallback,
diff --git a/src/mongo/db/query/parsed_distinct_test.cpp b/src/mongo/db/query/parsed_distinct_test.cpp
index f543a13b95a..d53457e500e 100644
--- a/src/mongo/db/query/parsed_distinct_test.cpp
+++ b/src/mongo/db/query/parsed_distinct_test.cpp
@@ -46,9 +46,9 @@ static const bool isExplain = true;
TEST(ParsedDistinctTest, ConvertToAggregationNoQuery) {
QueryTestServiceContext serviceContext;
auto uniqueTxn = serviceContext.makeOperationContext();
- OperationContext* txn = uniqueTxn.get();
+ OperationContext* opCtx = uniqueTxn.get();
- auto pd = ParsedDistinct::parse(txn,
+ auto pd = ParsedDistinct::parse(opCtx,
testns,
fromjson("{distinct: 'testcoll', key: 'x'}"),
ExtensionsCallbackDisallowExtensions(),
@@ -82,9 +82,9 @@ TEST(ParsedDistinctTest, ConvertToAggregationNoQuery) {
TEST(ParsedDistinctTest, ConvertToAggregationWithQuery) {
QueryTestServiceContext serviceContext;
auto uniqueTxn = serviceContext.makeOperationContext();
- OperationContext* txn = uniqueTxn.get();
+ OperationContext* opCtx = uniqueTxn.get();
- auto pd = ParsedDistinct::parse(txn,
+ auto pd = ParsedDistinct::parse(opCtx,
testns,
fromjson("{distinct: 'testcoll', key: 'y', query: {z: 7}}"),
ExtensionsCallbackDisallowExtensions(),
@@ -119,9 +119,9 @@ TEST(ParsedDistinctTest, ConvertToAggregationWithQuery) {
TEST(ParsedDistinctTest, ConvertToAggregationWithExplain) {
QueryTestServiceContext serviceContext;
auto uniqueTxn = serviceContext.makeOperationContext();
- OperationContext* txn = uniqueTxn.get();
+ OperationContext* opCtx = uniqueTxn.get();
- auto pd = ParsedDistinct::parse(txn,
+ auto pd = ParsedDistinct::parse(opCtx,
testns,
fromjson("{distinct: 'testcoll', key: 'x'}"),
ExtensionsCallbackDisallowExtensions(),
diff --git a/src/mongo/db/query/plan_cache_test.cpp b/src/mongo/db/query/plan_cache_test.cpp
index 752afe99fa2..5672f6cc1af 100644
--- a/src/mongo/db/query/plan_cache_test.cpp
+++ b/src/mongo/db/query/plan_cache_test.cpp
@@ -67,12 +67,12 @@ static const NamespaceString nss("test.collection");
*/
unique_ptr<CanonicalQuery> canonicalize(const BSONObj& queryObj) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(queryObj);
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
@@ -87,7 +87,7 @@ unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
const char* projStr,
const char* collationStr) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson(queryStr));
@@ -95,7 +95,7 @@ unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
qr->setProj(fromjson(projStr));
qr->setCollation(fromjson(collationStr));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
@@ -109,7 +109,7 @@ unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
const char* minStr,
const char* maxStr) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson(queryStr));
@@ -125,7 +125,7 @@ unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
qr->setMin(fromjson(minStr));
qr->setMax(fromjson(maxStr));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
@@ -141,7 +141,7 @@ unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
bool snapshot,
bool explain) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson(queryStr));
@@ -159,7 +159,7 @@ unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
qr->setSnapshot(snapshot);
qr->setExplain(explain);
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
@@ -539,7 +539,7 @@ protected:
const BSONObj& maxObj,
bool snapshot) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
// Clean up any previous state from a call to runQueryFull or runQueryAsCommand.
for (vector<QuerySolution*>::iterator it = solns.begin(); it != solns.end(); ++it) {
@@ -563,7 +563,7 @@ protected:
qr->setMax(maxObj);
qr->setSnapshot(snapshot);
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
Status s = QueryPlanner::plan(*statusWithCQ.getValue(), params, &solns);
ASSERT_OK(s);
@@ -571,7 +571,7 @@ protected:
void runQueryAsCommand(const BSONObj& cmdObj) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
// Clean up any previous state from a call to runQueryFull or runQueryAsCommand.
for (vector<QuerySolution*>::iterator it = solns.begin(); it != solns.end(); ++it) {
@@ -585,7 +585,7 @@ protected:
assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
Status s = QueryPlanner::plan(*statusWithCQ.getValue(), params, &solns);
ASSERT_OK(s);
@@ -658,7 +658,7 @@ protected:
const BSONObj& collation,
const QuerySolution& soln) const {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(query);
@@ -666,7 +666,7 @@ protected:
qr->setProj(proj);
qr->setCollation(collation);
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> scopedCq = std::move(statusWithCQ.getValue());
diff --git a/src/mongo/db/query/plan_executor.cpp b/src/mongo/db/query/plan_executor.cpp
index d162e998bab..dd3f66164f4 100644
--- a/src/mongo/db/query/plan_executor.cpp
+++ b/src/mongo/db/query/plan_executor.cpp
@@ -134,7 +134,7 @@ StatusWith<unique_ptr<PlanExecutor>> PlanExecutor::make(OperationContext* opCtx,
}
// static
-StatusWith<unique_ptr<PlanExecutor>> PlanExecutor::make(OperationContext* txn,
+StatusWith<unique_ptr<PlanExecutor>> PlanExecutor::make(OperationContext* opCtx,
unique_ptr<WorkingSet> ws,
unique_ptr<PlanStage> rt,
unique_ptr<QuerySolution> qs,
@@ -143,7 +143,7 @@ StatusWith<unique_ptr<PlanExecutor>> PlanExecutor::make(OperationContext* txn,
const string& ns,
YieldPolicy yieldPolicy) {
unique_ptr<PlanExecutor> exec(new PlanExecutor(
- txn, std::move(ws), std::move(rt), std::move(qs), std::move(cq), collection, ns));
+ opCtx, std::move(ws), std::move(rt), std::move(qs), std::move(cq), collection, ns));
// Perform plan selection, if necessary.
Status status = exec->pickBestPlan(yieldPolicy, collection);
@@ -322,21 +322,21 @@ void PlanExecutor::detachFromOperationContext() {
_everDetachedFromOperationContext = true;
}
-void PlanExecutor::reattachToOperationContext(OperationContext* txn) {
+void PlanExecutor::reattachToOperationContext(OperationContext* opCtx) {
invariant(_currentState == kDetached);
// We're reattaching for a getMore now. Reset the yield timer in order to prevent from
// yielding again right away.
_yieldPolicy->resetTimer();
- _opCtx = txn;
- _root->reattachToOperationContext(txn);
+ _opCtx = opCtx;
+ _root->reattachToOperationContext(opCtx);
_currentState = kSaved;
}
-void PlanExecutor::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+void PlanExecutor::invalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) {
if (!killed()) {
- _root->invalidate(txn, dl, type);
+ _root->invalidate(opCtx, dl, type);
}
}
diff --git a/src/mongo/db/query/plan_executor.h b/src/mongo/db/query/plan_executor.h
index 207fc6765c2..1e97963f67b 100644
--- a/src/mongo/db/query/plan_executor.h
+++ b/src/mongo/db/query/plan_executor.h
@@ -336,7 +336,7 @@ public:
* state. As such, if the plan yields, it must be notified of relevant writes so that
* we can ensure that it doesn't crash if we try to access invalid state.
*/
- void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ void invalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type);
/**
* Helper method to aid in displaying an ExecState for debug or other recreational purposes.
@@ -413,7 +413,7 @@ private:
/**
* Public factory methods delegate to this private factory to do their work.
*/
- static StatusWith<std::unique_ptr<PlanExecutor>> make(OperationContext* txn,
+ static StatusWith<std::unique_ptr<PlanExecutor>> make(OperationContext* opCtx,
std::unique_ptr<WorkingSet> ws,
std::unique_ptr<PlanStage> rt,
std::unique_ptr<QuerySolution> qs,
diff --git a/src/mongo/db/query/query_planner_test.cpp b/src/mongo/db/query/query_planner_test.cpp
index de3607bf0af..022ce5f533b 100644
--- a/src/mongo/db/query/query_planner_test.cpp
+++ b/src/mongo/db/query/query_planner_test.cpp
@@ -4249,8 +4249,8 @@ TEST_F(QueryPlannerTest, CacheDataFromTaggedTreeFailsOnBadInput) {
auto qr = stdx::make_unique<QueryRequest>(NamespaceString("test.collection"));
qr->setFilter(BSON("a" << 3));
- auto statusWithCQ =
- CanonicalQuery::canonicalize(txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ auto statusWithCQ = CanonicalQuery::canonicalize(
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> scopedCq = std::move(statusWithCQ.getValue());
scopedCq->root()->setTag(new IndexTag(1));
@@ -4265,8 +4265,8 @@ TEST_F(QueryPlannerTest, TagAccordingToCacheFailsOnBadInput) {
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(BSON("a" << 3));
- auto statusWithCQ =
- CanonicalQuery::canonicalize(txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ auto statusWithCQ = CanonicalQuery::canonicalize(
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> scopedCq = std::move(statusWithCQ.getValue());
@@ -4296,7 +4296,7 @@ TEST_F(QueryPlannerTest, TagAccordingToCacheFailsOnBadInput) {
auto newQR = stdx::make_unique<QueryRequest>(nss);
newQR->setFilter(BSON("a" << 3));
statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(newQR), ExtensionsCallbackDisallowExtensions());
+ opCtx(), std::move(newQR), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
scopedCq = std::move(statusWithCQ.getValue());
diff --git a/src/mongo/db/query/query_planner_test_fixture.cpp b/src/mongo/db/query/query_planner_test_fixture.cpp
index bb97ce06bde..4e1a051b8a0 100644
--- a/src/mongo/db/query/query_planner_test_fixture.cpp
+++ b/src/mongo/db/query/query_planner_test_fixture.cpp
@@ -56,7 +56,7 @@ void QueryPlannerTest::setUp() {
addIndex(BSON("_id" << 1));
}
-OperationContext* QueryPlannerTest::txn() {
+OperationContext* QueryPlannerTest::opCtx() {
return opCtx.get();
}
@@ -253,7 +253,7 @@ void QueryPlannerTest::runQueryFull(const BSONObj& query,
qr->setMax(maxObj);
qr->setSnapshot(snapshot);
auto statusWithCQ =
- CanonicalQuery::canonicalize(txn(), std::move(qr), ExtensionsCallbackNoop());
+ CanonicalQuery::canonicalize(opCtx(), std::move(qr), ExtensionsCallbackNoop());
ASSERT_OK(statusWithCQ.getStatus());
cq = std::move(statusWithCQ.getValue());
@@ -330,7 +330,7 @@ void QueryPlannerTest::runInvalidQueryFull(const BSONObj& query,
qr->setMax(maxObj);
qr->setSnapshot(snapshot);
auto statusWithCQ =
- CanonicalQuery::canonicalize(txn(), std::move(qr), ExtensionsCallbackNoop());
+ CanonicalQuery::canonicalize(opCtx(), std::move(qr), ExtensionsCallbackNoop());
ASSERT_OK(statusWithCQ.getStatus());
cq = std::move(statusWithCQ.getValue());
@@ -349,7 +349,7 @@ void QueryPlannerTest::runQueryAsCommand(const BSONObj& cmdObj) {
assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
auto statusWithCQ =
- CanonicalQuery::canonicalize(txn(), std::move(qr), ExtensionsCallbackNoop());
+ CanonicalQuery::canonicalize(opCtx(), std::move(qr), ExtensionsCallbackNoop());
ASSERT_OK(statusWithCQ.getStatus());
cq = std::move(statusWithCQ.getValue());
@@ -368,7 +368,7 @@ void QueryPlannerTest::runInvalidQueryAsCommand(const BSONObj& cmdObj) {
assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
auto statusWithCQ =
- CanonicalQuery::canonicalize(txn(), std::move(qr), ExtensionsCallbackNoop());
+ CanonicalQuery::canonicalize(opCtx(), std::move(qr), ExtensionsCallbackNoop());
ASSERT_OK(statusWithCQ.getStatus());
cq = std::move(statusWithCQ.getValue());
diff --git a/src/mongo/db/query/query_planner_test_fixture.h b/src/mongo/db/query/query_planner_test_fixture.h
index 42738876785..18a586bea61 100644
--- a/src/mongo/db/query/query_planner_test_fixture.h
+++ b/src/mongo/db/query/query_planner_test_fixture.h
@@ -48,7 +48,7 @@ class QueryPlannerTest : public mongo::unittest::Test {
protected:
void setUp();
- OperationContext* txn();
+ OperationContext* opCtx();
//
// Build up test.
diff --git a/src/mongo/db/query/query_yield.cpp b/src/mongo/db/query/query_yield.cpp
index 6548a662a17..5f43cf819ec 100644
--- a/src/mongo/db/query/query_yield.cpp
+++ b/src/mongo/db/query/query_yield.cpp
@@ -45,7 +45,7 @@ MONGO_FP_DECLARE(setYieldAllLocksWait);
} // namespace
// static
-void QueryYield::yieldAllLocks(OperationContext* txn,
+void QueryYield::yieldAllLocks(OperationContext* opCtx,
RecordFetcher* fetcher,
const std::string& planExecNS) {
// Things have to happen here in a specific order:
@@ -55,12 +55,12 @@ void QueryYield::yieldAllLocks(OperationContext* txn,
// 4) Touch the record we're yielding on, if there is one (RecordFetcher::fetch)
// 5) Reacquire lock mgr locks
- Locker* locker = txn->lockState();
+ Locker* locker = opCtx->lockState();
Locker::LockSnapshot snapshot;
if (fetcher) {
- fetcher->setup(txn);
+ fetcher->setup(opCtx);
}
// Nothing was unlocked, just return, yielding is pointless.
@@ -70,7 +70,7 @@ void QueryYield::yieldAllLocks(OperationContext* txn,
// Top-level locks are freed, release any potential low-level (storage engine-specific
// locks). If we are yielding, we are at a safe place to do so.
- txn->recoveryUnit()->abandonSnapshot();
+ opCtx->recoveryUnit()->abandonSnapshot();
MONGO_FAIL_POINT_PAUSE_WHILE_SET(setYieldAllLocksHang);
@@ -83,7 +83,7 @@ void QueryYield::yieldAllLocks(OperationContext* txn,
}
// Track the number of yields in CurOp.
- CurOp::get(txn)->yielded();
+ CurOp::get(opCtx)->yielded();
if (fetcher) {
fetcher->fetch();
diff --git a/src/mongo/db/query/query_yield.h b/src/mongo/db/query/query_yield.h
index 796bf6e797c..a42e29800c9 100644
--- a/src/mongo/db/query/query_yield.h
+++ b/src/mongo/db/query/query_yield.h
@@ -48,7 +48,7 @@ public:
*
* If in a nested context (eg DBDirectClient), does nothing.
*/
- static void yieldAllLocks(OperationContext* txn,
+ static void yieldAllLocks(OperationContext* opCtx,
RecordFetcher* fetcher,
const std::string& planExecNS);
};
diff --git a/src/mongo/db/query/stage_builder.cpp b/src/mongo/db/query/stage_builder.cpp
index e790325a439..dc07281f5da 100644
--- a/src/mongo/db/query/stage_builder.cpp
+++ b/src/mongo/db/query/stage_builder.cpp
@@ -65,7 +65,7 @@ namespace mongo {
using std::unique_ptr;
using stdx::make_unique;
-PlanStage* buildStages(OperationContext* txn,
+PlanStage* buildStages(OperationContext* opCtx,
Collection* collection,
const CanonicalQuery& cq,
const QuerySolution& qsol,
@@ -79,7 +79,7 @@ PlanStage* buildStages(OperationContext* txn,
params.direction =
(csn->direction == 1) ? CollectionScanParams::FORWARD : CollectionScanParams::BACKWARD;
params.maxScan = csn->maxScan;
- return new CollectionScan(txn, params, ws, csn->filter.get());
+ return new CollectionScan(opCtx, params, ws, csn->filter.get());
} else if (STAGE_IXSCAN == root->getType()) {
const IndexScanNode* ixn = static_cast<const IndexScanNode*>(root);
@@ -90,24 +90,24 @@ PlanStage* buildStages(OperationContext* txn,
IndexScanParams params;
- params.descriptor = collection->getIndexCatalog()->findIndexByName(txn, ixn->index.name);
+ params.descriptor = collection->getIndexCatalog()->findIndexByName(opCtx, ixn->index.name);
invariant(params.descriptor);
params.bounds = ixn->bounds;
params.direction = ixn->direction;
params.maxScan = ixn->maxScan;
params.addKeyMetadata = ixn->addKeyMetadata;
- return new IndexScan(txn, params, ws, ixn->filter.get());
+ return new IndexScan(opCtx, params, ws, ixn->filter.get());
} else if (STAGE_FETCH == root->getType()) {
const FetchNode* fn = static_cast<const FetchNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, cq, qsol, fn->children[0], ws);
+ PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, fn->children[0], ws);
if (NULL == childStage) {
return NULL;
}
- return new FetchStage(txn, ws, childStage, fn->filter.get(), collection);
+ return new FetchStage(opCtx, ws, childStage, fn->filter.get(), collection);
} else if (STAGE_SORT == root->getType()) {
const SortNode* sn = static_cast<const SortNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, cq, qsol, sn->children[0], ws);
+ PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, sn->children[0], ws);
if (NULL == childStage) {
return NULL;
}
@@ -115,23 +115,24 @@ PlanStage* buildStages(OperationContext* txn,
params.collection = collection;
params.pattern = sn->pattern;
params.limit = sn->limit;
- return new SortStage(txn, params, ws, childStage);
+ return new SortStage(opCtx, params, ws, childStage);
} else if (STAGE_SORT_KEY_GENERATOR == root->getType()) {
const SortKeyGeneratorNode* keyGenNode = static_cast<const SortKeyGeneratorNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, cq, qsol, keyGenNode->children[0], ws);
+ PlanStage* childStage =
+ buildStages(opCtx, collection, cq, qsol, keyGenNode->children[0], ws);
if (NULL == childStage) {
return NULL;
}
return new SortKeyGeneratorStage(
- txn, childStage, ws, keyGenNode->sortSpec, keyGenNode->queryObj, cq.getCollator());
+ opCtx, childStage, ws, keyGenNode->sortSpec, keyGenNode->queryObj, cq.getCollator());
} else if (STAGE_PROJECTION == root->getType()) {
const ProjectionNode* pn = static_cast<const ProjectionNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, cq, qsol, pn->children[0], ws);
+ PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, pn->children[0], ws);
if (NULL == childStage) {
return NULL;
}
- ProjectionStageParams params(ExtensionsCallbackReal(txn, &collection->ns()));
+ ProjectionStageParams params(ExtensionsCallbackReal(opCtx, &collection->ns()));
params.projObj = pn->projection;
params.collator = cq.getCollator();
@@ -148,26 +149,26 @@ PlanStage* buildStages(OperationContext* txn,
params.projImpl = ProjectionStageParams::SIMPLE_DOC;
}
- return new ProjectionStage(txn, params, ws, childStage);
+ return new ProjectionStage(opCtx, params, ws, childStage);
} else if (STAGE_LIMIT == root->getType()) {
const LimitNode* ln = static_cast<const LimitNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, cq, qsol, ln->children[0], ws);
+ PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, ln->children[0], ws);
if (NULL == childStage) {
return NULL;
}
- return new LimitStage(txn, ln->limit, ws, childStage);
+ return new LimitStage(opCtx, ln->limit, ws, childStage);
} else if (STAGE_SKIP == root->getType()) {
const SkipNode* sn = static_cast<const SkipNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, cq, qsol, sn->children[0], ws);
+ PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, sn->children[0], ws);
if (NULL == childStage) {
return NULL;
}
- return new SkipStage(txn, sn->skip, ws, childStage);
+ return new SkipStage(opCtx, sn->skip, ws, childStage);
} else if (STAGE_AND_HASH == root->getType()) {
const AndHashNode* ahn = static_cast<const AndHashNode*>(root);
- auto ret = make_unique<AndHashStage>(txn, ws, collection);
+ auto ret = make_unique<AndHashStage>(opCtx, ws, collection);
for (size_t i = 0; i < ahn->children.size(); ++i) {
- PlanStage* childStage = buildStages(txn, collection, cq, qsol, ahn->children[i], ws);
+ PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, ahn->children[i], ws);
if (NULL == childStage) {
return NULL;
}
@@ -176,9 +177,9 @@ PlanStage* buildStages(OperationContext* txn,
return ret.release();
} else if (STAGE_OR == root->getType()) {
const OrNode* orn = static_cast<const OrNode*>(root);
- auto ret = make_unique<OrStage>(txn, ws, orn->dedup, orn->filter.get());
+ auto ret = make_unique<OrStage>(opCtx, ws, orn->dedup, orn->filter.get());
for (size_t i = 0; i < orn->children.size(); ++i) {
- PlanStage* childStage = buildStages(txn, collection, cq, qsol, orn->children[i], ws);
+ PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, orn->children[i], ws);
if (NULL == childStage) {
return NULL;
}
@@ -187,9 +188,9 @@ PlanStage* buildStages(OperationContext* txn,
return ret.release();
} else if (STAGE_AND_SORTED == root->getType()) {
const AndSortedNode* asn = static_cast<const AndSortedNode*>(root);
- auto ret = make_unique<AndSortedStage>(txn, ws, collection);
+ auto ret = make_unique<AndSortedStage>(opCtx, ws, collection);
for (size_t i = 0; i < asn->children.size(); ++i) {
- PlanStage* childStage = buildStages(txn, collection, cq, qsol, asn->children[i], ws);
+ PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, asn->children[i], ws);
if (NULL == childStage) {
return NULL;
}
@@ -202,9 +203,9 @@ PlanStage* buildStages(OperationContext* txn,
params.dedup = msn->dedup;
params.pattern = msn->sort;
params.collator = cq.getCollator();
- auto ret = make_unique<MergeSortStage>(txn, params, ws, collection);
+ auto ret = make_unique<MergeSortStage>(opCtx, params, ws, collection);
for (size_t i = 0; i < msn->children.size(); ++i) {
- PlanStage* childStage = buildStages(txn, collection, cq, qsol, msn->children[i], ws);
+ PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, msn->children[i], ws);
if (NULL == childStage) {
return NULL;
}
@@ -222,10 +223,10 @@ PlanStage* buildStages(OperationContext* txn,
params.addDistMeta = node->addDistMeta;
IndexDescriptor* twoDIndex =
- collection->getIndexCatalog()->findIndexByName(txn, node->index.name);
+ collection->getIndexCatalog()->findIndexByName(opCtx, node->index.name);
invariant(twoDIndex);
- GeoNear2DStage* nearStage = new GeoNear2DStage(params, txn, ws, collection, twoDIndex);
+ GeoNear2DStage* nearStage = new GeoNear2DStage(params, opCtx, ws, collection, twoDIndex);
return nearStage;
} else if (STAGE_GEO_NEAR_2DSPHERE == root->getType()) {
@@ -239,14 +240,14 @@ PlanStage* buildStages(OperationContext* txn,
params.addDistMeta = node->addDistMeta;
IndexDescriptor* s2Index =
- collection->getIndexCatalog()->findIndexByName(txn, node->index.name);
+ collection->getIndexCatalog()->findIndexByName(opCtx, node->index.name);
invariant(s2Index);
- return new GeoNear2DSphereStage(params, txn, ws, collection, s2Index);
+ return new GeoNear2DSphereStage(params, opCtx, ws, collection, s2Index);
} else if (STAGE_TEXT == root->getType()) {
const TextNode* node = static_cast<const TextNode*>(root);
IndexDescriptor* desc =
- collection->getIndexCatalog()->findIndexByName(txn, node->index.name);
+ collection->getIndexCatalog()->findIndexByName(opCtx, node->index.name);
invariant(desc);
const FTSAccessMethod* fam =
static_cast<FTSAccessMethod*>(collection->getIndexCatalog()->getIndex(desc));
@@ -260,25 +261,25 @@ PlanStage* buildStages(OperationContext* txn,
// planning a query that contains "no-op" expressions. TODO: make StageBuilder::build()
// fail in this case (this improvement is being tracked by SERVER-21510).
params.query = static_cast<FTSQueryImpl&>(*node->ftsQuery);
- return new TextStage(txn, params, ws, node->filter.get());
+ return new TextStage(opCtx, params, ws, node->filter.get());
} else if (STAGE_SHARDING_FILTER == root->getType()) {
const ShardingFilterNode* fn = static_cast<const ShardingFilterNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, cq, qsol, fn->children[0], ws);
+ PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, fn->children[0], ws);
if (NULL == childStage) {
return NULL;
}
return new ShardFilterStage(
- txn,
- CollectionShardingState::get(txn, collection->ns())->getMetadata(),
+ opCtx,
+ CollectionShardingState::get(opCtx, collection->ns())->getMetadata(),
ws,
childStage);
} else if (STAGE_KEEP_MUTATIONS == root->getType()) {
const KeepMutationsNode* km = static_cast<const KeepMutationsNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, cq, qsol, km->children[0], ws);
+ PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, km->children[0], ws);
if (NULL == childStage) {
return NULL;
}
- return new KeepMutationsStage(txn, km->filter.get(), ws, childStage);
+ return new KeepMutationsStage(opCtx, km->filter.get(), ws, childStage);
} else if (STAGE_DISTINCT_SCAN == root->getType()) {
const DistinctNode* dn = static_cast<const DistinctNode*>(root);
@@ -289,12 +290,12 @@ PlanStage* buildStages(OperationContext* txn,
DistinctParams params;
- params.descriptor = collection->getIndexCatalog()->findIndexByName(txn, dn->index.name);
+ params.descriptor = collection->getIndexCatalog()->findIndexByName(opCtx, dn->index.name);
invariant(params.descriptor);
params.direction = dn->direction;
params.bounds = dn->bounds;
params.fieldNo = dn->fieldNo;
- return new DistinctScan(txn, params, ws);
+ return new DistinctScan(opCtx, params, ws);
} else if (STAGE_COUNT_SCAN == root->getType()) {
const CountScanNode* csn = static_cast<const CountScanNode*>(root);
@@ -305,21 +306,21 @@ PlanStage* buildStages(OperationContext* txn,
CountScanParams params;
- params.descriptor = collection->getIndexCatalog()->findIndexByName(txn, csn->index.name);
+ params.descriptor = collection->getIndexCatalog()->findIndexByName(opCtx, csn->index.name);
invariant(params.descriptor);
params.startKey = csn->startKey;
params.startKeyInclusive = csn->startKeyInclusive;
params.endKey = csn->endKey;
params.endKeyInclusive = csn->endKeyInclusive;
- return new CountScan(txn, params, ws);
+ return new CountScan(opCtx, params, ws);
} else if (STAGE_ENSURE_SORTED == root->getType()) {
const EnsureSortedNode* esn = static_cast<const EnsureSortedNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, cq, qsol, esn->children[0], ws);
+ PlanStage* childStage = buildStages(opCtx, collection, cq, qsol, esn->children[0], ws);
if (NULL == childStage) {
return NULL;
}
- return new EnsureSortedStage(txn, esn->pattern, ws, childStage);
+ return new EnsureSortedStage(opCtx, esn->pattern, ws, childStage);
} else {
mongoutils::str::stream ss;
root->appendToString(&ss, 0);
@@ -330,7 +331,7 @@ PlanStage* buildStages(OperationContext* txn,
}
// static (this one is used for Cached and MultiPlanStage)
-bool StageBuilder::build(OperationContext* txn,
+bool StageBuilder::build(OperationContext* opCtx,
Collection* collection,
const CanonicalQuery& cq,
const QuerySolution& solution,
@@ -349,7 +350,7 @@ bool StageBuilder::build(OperationContext* txn,
if (NULL == solutionNode) {
return false;
}
- return NULL != (*rootOut = buildStages(txn, collection, cq, solution, solutionNode, wsIn));
+ return NULL != (*rootOut = buildStages(opCtx, collection, cq, solution, solutionNode, wsIn));
}
} // namespace mongo
diff --git a/src/mongo/db/query/stage_builder.h b/src/mongo/db/query/stage_builder.h
index ee5ae17bb75..14ab05ab211 100644
--- a/src/mongo/db/query/stage_builder.h
+++ b/src/mongo/db/query/stage_builder.h
@@ -51,7 +51,7 @@ public:
*
* Returns false otherwise. *rootOut and *wsOut are invalid.
*/
- static bool build(OperationContext* txn,
+ static bool build(OperationContext* opCtx,
Collection* collection,
const CanonicalQuery& cq,
const QuerySolution& solution,
diff --git a/src/mongo/db/range_deleter.cpp b/src/mongo/db/range_deleter.cpp
index dc6e48e038d..e3a737600d7 100644
--- a/src/mongo/db/range_deleter.cpp
+++ b/src/mongo/db/range_deleter.cpp
@@ -201,7 +201,7 @@ void RangeDeleter::stopWorkers() {
}
}
-bool RangeDeleter::queueDelete(OperationContext* txn,
+bool RangeDeleter::queueDelete(OperationContext* opCtx,
const RangeDeleterOptions& options,
Notification<void>* doneSignal,
std::string* errMsg) {
@@ -231,7 +231,7 @@ bool RangeDeleter::queueDelete(OperationContext* txn,
}
if (options.waitForOpenCursors) {
- _env->getCursorIds(txn, ns, &toDelete->cursorsToWait);
+ _env->getCursorIds(opCtx, ns, &toDelete->cursorsToWait);
}
toDelete->stats.queueStartTS = jsTime();
@@ -257,12 +257,12 @@ bool RangeDeleter::queueDelete(OperationContext* txn,
namespace {
const int kWTimeoutMillis = 60 * 60 * 1000;
-bool _waitForMajority(OperationContext* txn, std::string* errMsg) {
+bool _waitForMajority(OperationContext* opCtx, std::string* errMsg) {
const WriteConcernOptions writeConcern(
WriteConcernOptions::kMajority, WriteConcernOptions::SyncMode::UNSET, kWTimeoutMillis);
repl::ReplicationCoordinator::StatusAndDuration replStatus =
- repl::getGlobalReplicationCoordinator()->awaitReplicationOfLastOpForClient(txn,
+ repl::getGlobalReplicationCoordinator()->awaitReplicationOfLastOpForClient(opCtx,
writeConcern);
if (!replStatus.status.isOK()) {
*errMsg = str::stream() << "rangeDeleter failed while waiting for replication after "
@@ -275,7 +275,7 @@ bool _waitForMajority(OperationContext* txn, std::string* errMsg) {
}
} // namespace
-bool RangeDeleter::deleteNow(OperationContext* txn,
+bool RangeDeleter::deleteNow(OperationContext* opCtx,
const RangeDeleterOptions& options,
string* errMsg) {
if (stopRequested()) {
@@ -308,7 +308,7 @@ bool RangeDeleter::deleteNow(OperationContext* txn,
RangeDeleteEntry taskDetails(options);
if (options.waitForOpenCursors) {
- _env->getCursorIds(txn, ns, &taskDetails.cursorsToWait);
+ _env->getCursorIds(opCtx, ns, &taskDetails.cursorsToWait);
}
long long checkIntervalMillis = 5;
@@ -320,7 +320,7 @@ bool RangeDeleter::deleteNow(OperationContext* txn,
logCursorsWaiting(&taskDetails);
set<CursorId> cursorsNow;
- _env->getCursorIds(txn, ns, &cursorsNow);
+ _env->getCursorIds(opCtx, ns, &cursorsNow);
set<CursorId> cursorsLeft;
std::set_intersection(taskDetails.cursorsToWait.begin(),
@@ -353,13 +353,13 @@ bool RangeDeleter::deleteNow(OperationContext* txn,
taskDetails.stats.queueEndTS = jsTime();
taskDetails.stats.deleteStartTS = jsTime();
- bool result = _env->deleteRange(txn, taskDetails, &taskDetails.stats.deletedDocCount, errMsg);
+ bool result = _env->deleteRange(opCtx, taskDetails, &taskDetails.stats.deletedDocCount, errMsg);
taskDetails.stats.deleteEndTS = jsTime();
if (result) {
taskDetails.stats.waitForReplStartTS = jsTime();
- result = _waitForMajority(txn, errMsg);
+ result = _waitForMajority(opCtx, errMsg);
taskDetails.stats.waitForReplEndTS = jsTime();
}
@@ -441,8 +441,8 @@ void RangeDeleter::doWork() {
set<CursorId> cursorsNow;
if (entry->options.waitForOpenCursors) {
- auto txn = client->makeOperationContext();
- _env->getCursorIds(txn.get(), entry->options.range.ns, &cursorsNow);
+ auto opCtx = client->makeOperationContext();
+ _env->getCursorIds(opCtx.get(), entry->options.range.ns, &cursorsNow);
}
set<CursorId> cursorsLeft;
@@ -479,16 +479,16 @@ void RangeDeleter::doWork() {
}
{
- auto txn = client->makeOperationContext();
+ auto opCtx = client->makeOperationContext();
nextTask->stats.deleteStartTS = jsTime();
- bool delResult =
- _env->deleteRange(txn.get(), *nextTask, &nextTask->stats.deletedDocCount, &errMsg);
+ bool delResult = _env->deleteRange(
+ opCtx.get(), *nextTask, &nextTask->stats.deletedDocCount, &errMsg);
nextTask->stats.deleteEndTS = jsTime();
if (delResult) {
nextTask->stats.waitForReplStartTS = jsTime();
- if (!_waitForMajority(txn.get(), &errMsg)) {
+ if (!_waitForMajority(opCtx.get(), &errMsg)) {
warning() << "Error encountered while waiting for replication: " << errMsg;
}
diff --git a/src/mongo/db/range_deleter.h b/src/mongo/db/range_deleter.h
index 776f8a825fc..84852b4960f 100644
--- a/src/mongo/db/range_deleter.h
+++ b/src/mongo/db/range_deleter.h
@@ -136,7 +136,7 @@ public:
* Returns true if the task is queued and false If the given range is blacklisted,
* is already queued, or stopWorkers() was called.
*/
- bool queueDelete(OperationContext* txn,
+ bool queueDelete(OperationContext* opCtx,
const RangeDeleterOptions& options,
Notification<void>* doneSignal,
std::string* errMsg);
@@ -148,7 +148,9 @@ public:
* Returns true if the deletion was performed. False if the range is blacklisted,
* was already queued, or stopWorkers() was called.
*/
- bool deleteNow(OperationContext* txn, const RangeDeleterOptions& options, std::string* errMsg);
+ bool deleteNow(OperationContext* opCtx,
+ const RangeDeleterOptions& options,
+ std::string* errMsg);
//
// Introspection methods
@@ -311,7 +313,7 @@ struct RangeDeleterEnv {
* Must be a synchronous call. Docs should be deleted after call ends.
* Must not throw Exceptions.
*/
- virtual bool deleteRange(OperationContext* txn,
+ virtual bool deleteRange(OperationContext* opCtx,
const RangeDeleteEntry& taskDetails,
long long int* deletedDocs,
std::string* errMsg) = 0;
@@ -324,7 +326,7 @@ struct RangeDeleterEnv {
* Must be a synchronous call. CursorIds should be populated after call.
* Must not throw exception.
*/
- virtual void getCursorIds(OperationContext* txn,
+ virtual void getCursorIds(OperationContext* opCtx,
StringData ns,
std::set<CursorId>* openCursors) = 0;
};
diff --git a/src/mongo/db/range_deleter_db_env.cpp b/src/mongo/db/range_deleter_db_env.cpp
index 2f692111c0a..08419a39a11 100644
--- a/src/mongo/db/range_deleter_db_env.cpp
+++ b/src/mongo/db/range_deleter_db_env.cpp
@@ -58,7 +58,7 @@ using std::string;
* 5. Delete range.
* 6. Wait until the majority of the secondaries catch up.
*/
-bool RangeDeleterDBEnv::deleteRange(OperationContext* txn,
+bool RangeDeleterDBEnv::deleteRange(OperationContext* opCtx,
const RangeDeleteEntry& taskDetails,
long long int* deletedDocs,
std::string* errMsg) {
@@ -73,7 +73,7 @@ bool RangeDeleterDBEnv::deleteRange(OperationContext* txn,
Client::initThreadIfNotAlready("RangeDeleter");
*deletedDocs = 0;
- OperationShardingState::IgnoreVersioningBlock forceVersion(txn, NamespaceString(ns));
+ OperationShardingState::IgnoreVersioningBlock forceVersion(opCtx, NamespaceString(ns));
Helpers::RemoveSaver removeSaver("moveChunk", ns, taskDetails.options.removeSaverReason);
Helpers::RemoveSaver* removeSaverPtr = NULL;
@@ -82,13 +82,13 @@ bool RangeDeleterDBEnv::deleteRange(OperationContext* txn,
}
// log the opId so the user can use it to cancel the delete using killOp.
- unsigned int opId = txn->getOpID();
+ unsigned int opId = opCtx->getOpID();
log() << "Deleter starting delete for: " << ns << " from " << redact(inclusiveLower) << " -> "
<< redact(exclusiveUpper) << ", with opId: " << opId;
try {
*deletedDocs =
- Helpers::removeRange(txn,
+ Helpers::removeRange(opCtx,
KeyRange(ns, inclusiveLower, exclusiveUpper, keyPattern),
BoundInclusion::kIncludeStartKeyOnly,
writeConcern,
@@ -116,10 +116,10 @@ bool RangeDeleterDBEnv::deleteRange(OperationContext* txn,
return true;
}
-void RangeDeleterDBEnv::getCursorIds(OperationContext* txn,
+void RangeDeleterDBEnv::getCursorIds(OperationContext* opCtx,
StringData ns,
std::set<CursorId>* openCursors) {
- AutoGetCollection autoColl(txn, NamespaceString(ns), MODE_IS);
+ AutoGetCollection autoColl(opCtx, NamespaceString(ns), MODE_IS);
if (!autoColl.getCollection())
return;
diff --git a/src/mongo/db/range_deleter_db_env.h b/src/mongo/db/range_deleter_db_env.h
index 0bca8c6618c..a246573eda6 100644
--- a/src/mongo/db/range_deleter_db_env.h
+++ b/src/mongo/db/range_deleter_db_env.h
@@ -50,7 +50,7 @@ struct RangeDeleterDBEnv : public RangeDeleterEnv {
*
* Does not throw Exceptions.
*/
- virtual bool deleteRange(OperationContext* txn,
+ virtual bool deleteRange(OperationContext* opCtx,
const RangeDeleteEntry& taskDetails,
long long int* deletedDocs,
std::string* errMsg);
@@ -58,7 +58,7 @@ struct RangeDeleterDBEnv : public RangeDeleterEnv {
/**
* Gets the list of open cursors on a given namespace.
*/
- virtual void getCursorIds(OperationContext* txn,
+ virtual void getCursorIds(OperationContext* opCtx,
StringData ns,
std::set<CursorId>* openCursors);
};
diff --git a/src/mongo/db/range_deleter_mock_env.cpp b/src/mongo/db/range_deleter_mock_env.cpp
index f8419105199..9ac49945346 100644
--- a/src/mongo/db/range_deleter_mock_env.cpp
+++ b/src/mongo/db/range_deleter_mock_env.cpp
@@ -95,7 +95,7 @@ DeletedRange RangeDeleterMockEnv::getLastDelete() const {
return _deleteList.back();
}
-bool RangeDeleterMockEnv::deleteRange(OperationContext* txn,
+bool RangeDeleterMockEnv::deleteRange(OperationContext* opCtx,
const RangeDeleteEntry& taskDetails,
long long int* deletedDocs,
string* errMsg) {
@@ -130,7 +130,7 @@ bool RangeDeleterMockEnv::deleteRange(OperationContext* txn,
return true;
}
-void RangeDeleterMockEnv::getCursorIds(OperationContext* txn, StringData ns, set<CursorId>* in) {
+void RangeDeleterMockEnv::getCursorIds(OperationContext* opCtx, StringData ns, set<CursorId>* in) {
{
stdx::lock_guard<stdx::mutex> sl(_cursorMapMutex);
const set<CursorId>& _cursors = _cursorMap[ns.toString()];
diff --git a/src/mongo/db/range_deleter_mock_env.h b/src/mongo/db/range_deleter_mock_env.h
index 307dbe2bd53..f57ef8b1d5e 100644
--- a/src/mongo/db/range_deleter_mock_env.h
+++ b/src/mongo/db/range_deleter_mock_env.h
@@ -124,7 +124,7 @@ public:
* but simply keeps a record of it. Can also be paused by pauseDeletes and
* resumed with resumeDeletes.
*/
- bool deleteRange(OperationContext* txn,
+ bool deleteRange(OperationContext* opCtx,
const RangeDeleteEntry& taskDetails,
long long int* deletedDocs,
std::string* errMsg);
@@ -134,7 +134,7 @@ public:
* RangeDeleterEnv::getCursorIds. The cursors returned can be modified with
* the setCursorId and clearCursorMap methods.
*/
- void getCursorIds(OperationContext* txn, StringData ns, std::set<CursorId>* in);
+ void getCursorIds(OperationContext* opCtx, StringData ns, std::set<CursorId>* in);
private:
// mutex acquisition ordering:
diff --git a/src/mongo/db/range_deleter_test.cpp b/src/mongo/db/range_deleter_test.cpp
index 501994e3495..633743c330a 100644
--- a/src/mongo/db/range_deleter_test.cpp
+++ b/src/mongo/db/range_deleter_test.cpp
@@ -65,7 +65,7 @@ public:
return getGlobalServiceContext();
}
OperationContext* getOpCtx() {
- return _txn.get();
+ return _opCtx.get();
}
protected:
@@ -82,12 +82,12 @@ private:
stdx::make_unique<mongo::repl::ReplicationCoordinatorMock>(getServiceContext(),
replSettings));
_client = getServiceContext()->makeClient("RangeDeleterTest");
- _txn = _client->makeOperationContext();
+ _opCtx = _client->makeOperationContext();
deleter.startWorkers();
}
ServiceContext::UniqueClient _client;
- ServiceContext::UniqueOperationContext _txn;
+ ServiceContext::UniqueOperationContext _opCtx;
};
using ImmediateDelete = RangeDeleterTest;
diff --git a/src/mongo/db/read_concern.cpp b/src/mongo/db/read_concern.cpp
index f88d4ff2339..396575db138 100644
--- a/src/mongo/db/read_concern.cpp
+++ b/src/mongo/db/read_concern.cpp
@@ -59,7 +59,7 @@ ExportedServerParameter<bool, ServerParameterType::kStartupOnly> TestingSnapshot
} // namespace
-StatusWith<repl::ReadConcernArgs> extractReadConcern(OperationContext* txn,
+StatusWith<repl::ReadConcernArgs> extractReadConcern(OperationContext* opCtx,
const BSONObj& cmdObj,
bool supportsReadConcern) {
repl::ReadConcernArgs readConcernArgs;
@@ -77,8 +77,8 @@ StatusWith<repl::ReadConcernArgs> extractReadConcern(OperationContext* txn,
return readConcernArgs;
}
-Status waitForReadConcern(OperationContext* txn, const repl::ReadConcernArgs& readConcernArgs) {
- repl::ReplicationCoordinator* const replCoord = repl::ReplicationCoordinator::get(txn);
+Status waitForReadConcern(OperationContext* opCtx, const repl::ReadConcernArgs& readConcernArgs) {
+ repl::ReplicationCoordinator* const replCoord = repl::ReplicationCoordinator::get(opCtx);
if (readConcernArgs.getLevel() == repl::ReadConcernLevel::kLinearizableReadConcern) {
if (replCoord->getReplicationMode() != repl::ReplicationCoordinator::modeReplSet) {
@@ -108,7 +108,7 @@ Status waitForReadConcern(OperationContext* txn, const repl::ReadConcernArgs& re
// Skip waiting for the OpTime when testing snapshot behavior
if (!testingSnapshotBehaviorInIsolation && !readConcernArgs.isEmpty()) {
- Status status = replCoord->waitUntilOpTimeForRead(txn, readConcernArgs);
+ Status status = replCoord->waitUntilOpTimeForRead(opCtx, readConcernArgs);
if (!status.isOK()) {
return status;
}
@@ -129,57 +129,57 @@ Status waitForReadConcern(OperationContext* txn, const repl::ReadConcernArgs& re
LOG(debugLevel) << "Waiting for 'committed' snapshot to be available for reading: "
<< readConcernArgs;
- Status status = txn->recoveryUnit()->setReadFromMajorityCommittedSnapshot();
+ Status status = opCtx->recoveryUnit()->setReadFromMajorityCommittedSnapshot();
// Wait until a snapshot is available.
while (status == ErrorCodes::ReadConcernMajorityNotAvailableYet) {
LOG(debugLevel) << "Snapshot not available yet.";
- replCoord->waitUntilSnapshotCommitted(txn, SnapshotName::min());
- status = txn->recoveryUnit()->setReadFromMajorityCommittedSnapshot();
+ replCoord->waitUntilSnapshotCommitted(opCtx, SnapshotName::min());
+ status = opCtx->recoveryUnit()->setReadFromMajorityCommittedSnapshot();
}
if (!status.isOK()) {
return status;
}
- LOG(debugLevel) << "Using 'committed' snapshot: " << CurOp::get(txn)->query();
+ LOG(debugLevel) << "Using 'committed' snapshot: " << CurOp::get(opCtx)->query();
}
return Status::OK();
}
-Status waitForLinearizableReadConcern(OperationContext* txn) {
+Status waitForLinearizableReadConcern(OperationContext* opCtx) {
repl::ReplicationCoordinator* replCoord =
- repl::ReplicationCoordinator::get(txn->getClient()->getServiceContext());
+ repl::ReplicationCoordinator::get(opCtx->getClient()->getServiceContext());
{
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock lk(txn->lockState(), "local", MODE_IX);
- Lock::CollectionLock lock(txn->lockState(), "local.oplog.rs", MODE_IX);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock lk(opCtx->lockState(), "local", MODE_IX);
+ Lock::CollectionLock lock(opCtx->lockState(), "local.oplog.rs", MODE_IX);
- if (!replCoord->canAcceptWritesForDatabase(txn, "admin")) {
+ if (!replCoord->canAcceptWritesForDatabase(opCtx, "admin")) {
return {ErrorCodes::NotMaster,
"No longer primary when waiting for linearizable read concern"};
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork uow(txn);
- txn->getClient()->getServiceContext()->getOpObserver()->onOpMessage(
- txn,
+ WriteUnitOfWork uow(opCtx);
+ opCtx->getClient()->getServiceContext()->getOpObserver()->onOpMessage(
+ opCtx,
BSON("msg"
<< "linearizable read"));
uow.commit();
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
- txn, "waitForLinearizableReadConcern", "local.rs.oplog");
+ opCtx, "waitForLinearizableReadConcern", "local.rs.oplog");
}
WriteConcernOptions wc = WriteConcernOptions(
WriteConcernOptions::kMajority, WriteConcernOptions::SyncMode::UNSET, 0);
- repl::OpTime lastOpApplied = repl::ReplClientInfo::forClient(txn->getClient()).getLastOp();
- auto awaitReplResult = replCoord->awaitReplication(txn, lastOpApplied, wc);
+ repl::OpTime lastOpApplied = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp();
+ auto awaitReplResult = replCoord->awaitReplication(opCtx, lastOpApplied, wc);
if (awaitReplResult.status == ErrorCodes::WriteConcernFailed) {
return Status(ErrorCodes::LinearizableReadConcernError,
"Failed to confirm that read was linearizable.");
diff --git a/src/mongo/db/read_concern.h b/src/mongo/db/read_concern.h
index 695be4d1b8c..158596520ea 100644
--- a/src/mongo/db/read_concern.h
+++ b/src/mongo/db/read_concern.h
@@ -44,7 +44,7 @@ class ReadConcernArgs;
* Given the specified command and whether it supports read concern, returns an effective read
* concern which should be used.
*/
-StatusWith<repl::ReadConcernArgs> extractReadConcern(OperationContext* txn,
+StatusWith<repl::ReadConcernArgs> extractReadConcern(OperationContext* opCtx,
const BSONObj& cmdObj,
bool supportsReadConcern);
@@ -53,12 +53,12 @@ StatusWith<repl::ReadConcernArgs> extractReadConcern(OperationContext* txn,
* satisfied given the current state of the server and if so calls into the replication subsystem to
* perform the wait.
*/
-Status waitForReadConcern(OperationContext* txn, const repl::ReadConcernArgs& readConcernArgs);
+Status waitForReadConcern(OperationContext* opCtx, const repl::ReadConcernArgs& readConcernArgs);
/*
* Given a linearizable read command, confirm that
* current primary is still the true primary of the replica set.
*/
-Status waitForLinearizableReadConcern(OperationContext* txn);
+Status waitForLinearizableReadConcern(OperationContext* opCtx);
} // namespace mongo
diff --git a/src/mongo/db/repair_database.cpp b/src/mongo/db/repair_database.cpp
index d688e7d5f44..5755d9a69f2 100644
--- a/src/mongo/db/repair_database.cpp
+++ b/src/mongo/db/repair_database.cpp
@@ -59,7 +59,7 @@ using std::string;
using IndexVersion = IndexDescriptor::IndexVersion;
namespace {
-Status rebuildIndexesOnCollection(OperationContext* txn,
+Status rebuildIndexesOnCollection(OperationContext* opCtx,
DatabaseCatalogEntry* dbce,
const std::string& collectionName) {
CollectionCatalogEntry* cce = dbce->getCollectionCatalogEntry(collectionName);
@@ -68,12 +68,12 @@ Status rebuildIndexesOnCollection(OperationContext* txn,
std::vector<BSONObj> indexSpecs;
{
// Fetch all indexes
- cce->getAllIndexes(txn, &indexNames);
+ cce->getAllIndexes(opCtx, &indexNames);
indexSpecs.reserve(indexNames.size());
for (size_t i = 0; i < indexNames.size(); i++) {
const string& name = indexNames[i];
- BSONObj spec = cce->getIndexSpec(txn, name);
+ BSONObj spec = cce->getIndexSpec(opCtx, name);
IndexVersion newIndexVersion = IndexVersion::kV0;
{
@@ -129,11 +129,11 @@ Status rebuildIndexesOnCollection(OperationContext* txn,
// 2) Open the Collection
// 3) Start the index build process.
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
{ // 1
for (size_t i = 0; i < indexNames.size(); i++) {
- Status s = cce->removeIndex(txn, indexNames[i]);
+ Status s = cce->removeIndex(opCtx, indexNames[i]);
if (!s.isOK())
return s;
}
@@ -143,9 +143,9 @@ Status rebuildIndexesOnCollection(OperationContext* txn,
// open a bad index and fail.
// TODO see if MultiIndexBlock can be made to work without a Collection.
const StringData ns = cce->ns().ns();
- collection.reset(new Collection(txn, ns, cce, dbce->getRecordStore(ns), dbce));
+ collection.reset(new Collection(opCtx, ns, cce, dbce->getRecordStore(ns), dbce));
- indexer.reset(new MultiIndexBlock(txn, collection.get()));
+ indexer.reset(new MultiIndexBlock(opCtx, collection.get()));
Status status = indexer->init(indexSpecs).getStatus();
if (!status.isOK()) {
// The WUOW will handle cleanup, so the indexer shouldn't do its own.
@@ -163,7 +163,7 @@ Status rebuildIndexesOnCollection(OperationContext* txn,
long long dataSize = 0;
RecordStore* rs = collection->getRecordStore();
- auto cursor = rs->getCursor(txn);
+ auto cursor = rs->getCursor(opCtx);
while (auto record = cursor->next()) {
RecordId id = record->id;
RecordData& data = record->data;
@@ -175,8 +175,8 @@ Status rebuildIndexesOnCollection(OperationContext* txn,
log() << "Invalid BSON detected at " << id << ": " << redact(status) << ". Deleting.";
cursor->save(); // 'data' is no longer valid.
{
- WriteUnitOfWork wunit(txn);
- rs->deleteRecord(txn, id);
+ WriteUnitOfWork wunit(opCtx);
+ rs->deleteRecord(opCtx, id);
wunit.commit();
}
cursor->restore();
@@ -188,7 +188,7 @@ Status rebuildIndexesOnCollection(OperationContext* txn,
// Now index the record.
// TODO SERVER-14812 add a mode that drops duplicates rather than failing
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
status = indexer->insert(data.releaseToBson(), id);
if (!status.isOK())
return status;
@@ -200,9 +200,9 @@ Status rebuildIndexesOnCollection(OperationContext* txn,
return status;
{
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
indexer->commit();
- rs->updateStatsAfterRepair(txn, numRecords, dataSize);
+ rs->updateStatsAfterRepair(opCtx, numRecords, dataSize);
wunit.commit();
}
@@ -210,27 +210,27 @@ Status rebuildIndexesOnCollection(OperationContext* txn,
}
} // namespace
-Status repairDatabase(OperationContext* txn,
+Status repairDatabase(OperationContext* opCtx,
StorageEngine* engine,
const std::string& dbName,
bool preserveClonedFilesOnFailure,
bool backupOriginalFiles) {
- DisableDocumentValidation validationDisabler(txn);
+ DisableDocumentValidation validationDisabler(opCtx);
// We must hold some form of lock here
- invariant(txn->lockState()->isLocked());
+ invariant(opCtx->lockState()->isLocked());
invariant(dbName.find('.') == string::npos);
log() << "repairDatabase " << dbName << endl;
BackgroundOperation::assertNoBgOpInProgForDb(dbName);
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
if (engine->isMmapV1()) {
// MMAPv1 is a layering violation so it implements its own repairDatabase.
return static_cast<MMAPV1Engine*>(engine)->repairDatabase(
- txn, dbName, preserveClonedFilesOnFailure, backupOriginalFiles);
+ opCtx, dbName, preserveClonedFilesOnFailure, backupOriginalFiles);
}
// These are MMAPv1 specific
@@ -242,17 +242,17 @@ Status repairDatabase(OperationContext* txn,
}
// Close the db to invalidate all current users and caches.
- dbHolder().close(txn, dbName);
- ON_BLOCK_EXIT([&dbName, &txn] {
+ dbHolder().close(opCtx, dbName);
+ ON_BLOCK_EXIT([&dbName, &opCtx] {
try {
// Open the db after everything finishes.
- auto db = dbHolder().openDb(txn, dbName);
+ auto db = dbHolder().openDb(opCtx, dbName);
// Set the minimum snapshot for all Collections in this db. This ensures that readers
// using majority readConcern level can only use the collections after their repaired
// versions are in the committed view.
- auto replCoord = repl::ReplicationCoordinator::get(txn);
- auto snapshotName = replCoord->reserveSnapshotName(txn);
+ auto replCoord = repl::ReplicationCoordinator::get(opCtx);
+ auto snapshotName = replCoord->reserveSnapshotName(opCtx);
replCoord->forceSnapshotCreation(); // Ensure a newer snapshot is created even if idle.
for (auto&& collection : *db) {
@@ -264,7 +264,7 @@ Status repairDatabase(OperationContext* txn,
}
});
- DatabaseCatalogEntry* dbce = engine->getDatabaseCatalogEntry(txn, dbName);
+ DatabaseCatalogEntry* dbce = engine->getDatabaseCatalogEntry(opCtx, dbName);
std::list<std::string> colls;
dbce->getCollectionNamespaces(&colls);
@@ -272,15 +272,15 @@ Status repairDatabase(OperationContext* txn,
for (std::list<std::string>::const_iterator it = colls.begin(); it != colls.end(); ++it) {
// Don't check for interrupt after starting to repair a collection otherwise we can
// leave data in an inconsistent state. Interrupting between collections is ok, however.
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
log() << "Repairing collection " << *it;
- Status status = engine->repairRecordStore(txn, *it);
+ Status status = engine->repairRecordStore(opCtx, *it);
if (!status.isOK())
return status;
- status = rebuildIndexesOnCollection(txn, dbce, *it);
+ status = rebuildIndexesOnCollection(opCtx, dbce, *it);
if (!status.isOK())
return status;
diff --git a/src/mongo/db/repair_database.h b/src/mongo/db/repair_database.h
index c0945c0ed43..b945b54c19f 100644
--- a/src/mongo/db/repair_database.h
+++ b/src/mongo/db/repair_database.h
@@ -41,7 +41,7 @@ class StringData;
* Some data may be lost or modified in the process but the output will
* be structurally valid on successful return.
*/
-Status repairDatabase(OperationContext* txn,
+Status repairDatabase(OperationContext* opCtx,
StorageEngine* engine,
const std::string& dbName,
bool preserveClonedFilesOnFailure = false,
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index a0f646f6078..186903d0924 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -140,20 +140,20 @@ BackgroundSync::BackgroundSync(
bufferMaxSizeGauge.increment(_oplogBuffer->getMaxSize() - bufferMaxSizeGauge.get());
}
-void BackgroundSync::startup(OperationContext* txn) {
- _oplogBuffer->startup(txn);
+void BackgroundSync::startup(OperationContext* opCtx) {
+ _oplogBuffer->startup(opCtx);
invariant(!_producerThread);
_producerThread.reset(new stdx::thread(stdx::bind(&BackgroundSync::_run, this)));
}
-void BackgroundSync::shutdown(OperationContext* txn) {
+void BackgroundSync::shutdown(OperationContext* opCtx) {
stdx::lock_guard<stdx::mutex> lock(_mutex);
// Clear the buffer. This unblocks the OplogFetcher if it is blocked with a full queue, but
// ensures that it won't add anything. It will also unblock the OpApplier pipeline if it is
// waiting for an operation to be past the slaveDelay point.
- clearBuffer(txn);
+ clearBuffer(opCtx);
_state = ProducerState::Stopped;
if (_syncSourceResolver) {
@@ -167,9 +167,9 @@ void BackgroundSync::shutdown(OperationContext* txn) {
_inShutdown = true;
}
-void BackgroundSync::join(OperationContext* txn) {
+void BackgroundSync::join(OperationContext* opCtx) {
_producerThread->join();
- _oplogBuffer->shutdown(txn);
+ _oplogBuffer->shutdown(opCtx);
}
bool BackgroundSync::inShutdown() const {
@@ -225,15 +225,15 @@ void BackgroundSync::_runProducer() {
}
// we want to start when we're no longer primary
// start() also loads _lastOpTimeFetched, which we know is set from the "if"
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
if (getState() == ProducerState::Starting) {
- start(txn.get());
+ start(opCtx.get());
}
- _produce(txn.get());
+ _produce(opCtx.get());
}
-void BackgroundSync::_produce(OperationContext* txn) {
+void BackgroundSync::_produce(OperationContext* opCtx) {
if (MONGO_FAIL_POINT(stopReplProducer)) {
// This log output is used in js tests so please leave it.
log() << "bgsync - stopReplProducer fail point "
@@ -271,7 +271,7 @@ void BackgroundSync::_produce(OperationContext* txn) {
HostAndPort source;
SyncSourceResolverResponse syncSourceResp;
{
- const OpTime minValidSaved = StorageInterface::get(txn)->getMinValid(txn);
+ const OpTime minValidSaved = StorageInterface::get(opCtx)->getMinValid(opCtx);
stdx::lock_guard<stdx::mutex> lock(_mutex);
const auto requiredOpTime = (minValidSaved > _lastOpTimeFetched) ? minValidSaved : OpTime();
@@ -358,8 +358,9 @@ void BackgroundSync::_produce(OperationContext* txn) {
// Set the applied point if unset. This is most likely the first time we've established a sync
// source since stepping down or otherwise clearing the applied point. We need to set this here,
// before the OplogWriter gets a chance to append to the oplog.
- if (StorageInterface::get(txn)->getAppliedThrough(txn).isNull()) {
- StorageInterface::get(txn)->setAppliedThrough(txn, _replCoord->getMyLastAppliedOpTime());
+ if (StorageInterface::get(opCtx)->getAppliedThrough(opCtx).isNull()) {
+ StorageInterface::get(opCtx)->setAppliedThrough(opCtx,
+ _replCoord->getMyLastAppliedOpTime());
}
// "lastFetched" not used. Already set in _enqueueDocuments.
@@ -472,7 +473,7 @@ void BackgroundSync::_produce(OperationContext* txn) {
}
}
- _rollback(txn, source, syncSourceResp.rbid, getConnection);
+ _rollback(opCtx, source, syncSourceResp.rbid, getConnection);
// Reset the producer to clear the sync source and the last optime fetched.
stop(true);
startProducerIfStopped();
@@ -540,10 +541,10 @@ Status BackgroundSync::_enqueueDocuments(Fetcher::Documents::const_iterator begi
return Status::OK(); // Nothing to do.
}
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
// Wait for enough space.
- _oplogBuffer->waitForSpace(txn.get(), info.toApplyDocumentBytes);
+ _oplogBuffer->waitForSpace(opCtx.get(), info.toApplyDocumentBytes);
{
// Don't add more to the buffer if we are in shutdown. Continue holding the lock until we
@@ -560,7 +561,7 @@ Status BackgroundSync::_enqueueDocuments(Fetcher::Documents::const_iterator begi
}
// Buffer docs for later application.
- _oplogBuffer->pushAllNonBlocking(txn.get(), begin, end);
+ _oplogBuffer->pushAllNonBlocking(opCtx.get(), begin, end);
// Update last fetched info.
_lastFetchedHash = info.lastDocument.value;
@@ -585,8 +586,8 @@ Status BackgroundSync::_enqueueDocuments(Fetcher::Documents::const_iterator begi
return Status::OK();
}
-bool BackgroundSync::peek(OperationContext* txn, BSONObj* op) {
- return _oplogBuffer->peek(txn, op);
+bool BackgroundSync::peek(OperationContext* opCtx, BSONObj* op) {
+ return _oplogBuffer->peek(opCtx, op);
}
void BackgroundSync::waitForMore() {
@@ -594,11 +595,11 @@ void BackgroundSync::waitForMore() {
_oplogBuffer->waitForData(Seconds(1));
}
-void BackgroundSync::consume(OperationContext* txn) {
+void BackgroundSync::consume(OperationContext* opCtx) {
// this is just to get the op off the queue, it's been peeked at
// and queued for application already
BSONObj op;
- if (_oplogBuffer->tryPop(txn, &op)) {
+ if (_oplogBuffer->tryPop(opCtx, &op)) {
bufferCountGauge.decrement(1);
bufferSizeGauge.decrement(getSize(op));
} else {
@@ -609,7 +610,7 @@ void BackgroundSync::consume(OperationContext* txn) {
}
}
-void BackgroundSync::_rollback(OperationContext* txn,
+void BackgroundSync::_rollback(OperationContext* opCtx,
const HostAndPort& source,
boost::optional<int> requiredRBID,
stdx::function<DBClientBase*()> getConnection) {
@@ -635,7 +636,7 @@ void BackgroundSync::_rollback(OperationContext* txn,
// then.
{
log() << "rollback 0";
- Lock::GlobalWrite globalWrite(txn->lockState());
+ Lock::GlobalWrite globalWrite(opCtx->lockState());
if (!_replCoord->setFollowerMode(MemberState::RS_ROLLBACK)) {
log() << "Cannot transition from " << _replCoord->getMemberState().toString() << " to "
<< MemberState(MemberState::RS_ROLLBACK).toString();
@@ -644,8 +645,8 @@ void BackgroundSync::_rollback(OperationContext* txn,
}
try {
- auto status = syncRollback(txn,
- OplogInterfaceLocal(txn, rsOplogName),
+ auto status = syncRollback(opCtx,
+ OplogInterfaceLocal(opCtx, rsOplogName),
RollbackSourceImpl(getConnection, source, rsOplogName),
requiredRBID,
_replCoord);
@@ -668,7 +669,7 @@ void BackgroundSync::_rollback(OperationContext* txn,
warning() << "rollback cannot complete at this time (retrying later): " << redact(ex)
<< " appliedThrough=" << _replCoord->getMyLastAppliedOpTime()
- << " minvalid=" << StorageInterface::get(txn)->getMinValid(txn);
+ << " minvalid=" << StorageInterface::get(opCtx)->getMinValid(opCtx);
// Sleep a bit to allow upstream node to coalesce, if that was the cause of the failure. If
// we failed in a way that will keep failing, but wasn't flagged as a fatal failure, this
@@ -684,12 +685,12 @@ void BackgroundSync::_rollback(OperationContext* txn,
// so that if we wind up shutting down uncleanly in response to something we rolled back
// we know that we won't wind up right back in the same situation when we start back up
// because the rollback wasn't durable.
- txn->recoveryUnit()->waitUntilDurable();
+ opCtx->recoveryUnit()->waitUntilDurable();
// If we detected that we rolled back the shardIdentity document as part of this rollback
// then we must shut down to clear the in-memory ShardingState associated with the
// shardIdentity document.
- if (ShardIdentityRollbackNotifier::get(txn)->didRollbackHappen()) {
+ if (ShardIdentityRollbackNotifier::get(opCtx)->didRollbackHappen()) {
severe() << "shardIdentity document rollback detected. Shutting down to clear "
"in-memory sharding state. Restarting this process should safely return it "
"to a healthy state";
@@ -734,10 +735,10 @@ void BackgroundSync::stop(bool resetLastFetchedOptime) {
}
}
-void BackgroundSync::start(OperationContext* txn) {
+void BackgroundSync::start(OperationContext* opCtx) {
OpTimeWithHash lastAppliedOpTimeWithHash;
do {
- lastAppliedOpTimeWithHash = _readLastAppliedOpTimeWithHash(txn);
+ lastAppliedOpTimeWithHash = _readLastAppliedOpTimeWithHash(opCtx);
stdx::lock_guard<stdx::mutex> lk(_mutex);
// Double check the state after acquiring the mutex.
if (_state != ProducerState::Starting) {
@@ -762,28 +763,28 @@ void BackgroundSync::start(OperationContext* txn) {
LOG(1) << "bgsync fetch queue set to: " << _lastOpTimeFetched << " " << _lastFetchedHash;
}
-void BackgroundSync::clearBuffer(OperationContext* txn) {
- _oplogBuffer->clear(txn);
+void BackgroundSync::clearBuffer(OperationContext* opCtx) {
+ _oplogBuffer->clear(opCtx);
const auto count = bufferCountGauge.get();
bufferCountGauge.decrement(count);
const auto size = bufferSizeGauge.get();
bufferSizeGauge.decrement(size);
}
-OpTimeWithHash BackgroundSync::_readLastAppliedOpTimeWithHash(OperationContext* txn) {
+OpTimeWithHash BackgroundSync::_readLastAppliedOpTimeWithHash(OperationContext* opCtx) {
BSONObj oplogEntry;
try {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock lk(txn->lockState(), "local", MODE_X);
- bool success = Helpers::getLast(txn, rsOplogName.c_str(), oplogEntry);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock lk(opCtx->lockState(), "local", MODE_X);
+ bool success = Helpers::getLast(opCtx, rsOplogName.c_str(), oplogEntry);
if (!success) {
// This can happen when we are to do an initial sync. lastHash will be set
// after the initial sync is complete.
return OpTimeWithHash(0);
}
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "readLastAppliedHash", rsOplogName);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "readLastAppliedHash", rsOplogName);
} catch (const DBException& ex) {
severe() << "Problem reading " << rsOplogName << ": " << redact(ex);
fassertFailed(18904);
@@ -817,8 +818,8 @@ bool BackgroundSync::shouldStopFetching() const {
return false;
}
-void BackgroundSync::pushTestOpToBuffer(OperationContext* txn, const BSONObj& op) {
- _oplogBuffer->push(txn, op);
+void BackgroundSync::pushTestOpToBuffer(OperationContext* opCtx, const BSONObj& op) {
+ _oplogBuffer->push(opCtx, op);
bufferCountGauge.increment();
bufferSizeGauge.increment(op.objsize());
}
diff --git a/src/mongo/db/repl/bgsync.h b/src/mongo/db/repl/bgsync.h
index 479caabaee8..85069066cdd 100644
--- a/src/mongo/db/repl/bgsync.h
+++ b/src/mongo/db/repl/bgsync.h
@@ -84,17 +84,17 @@ public:
/**
* Starts oplog buffer, task executor and producer thread, in that order.
*/
- void startup(OperationContext* txn);
+ void startup(OperationContext* opCtx);
/**
* Signals producer thread to stop.
*/
- void shutdown(OperationContext* txn);
+ void shutdown(OperationContext* opCtx);
/**
* Waits for producer thread to stop before shutting down the task executor and oplog buffer.
*/
- void join(OperationContext* txn);
+ void join(OperationContext* opCtx);
/**
* Returns true if shutdown() has been called.
@@ -109,8 +109,8 @@ public:
// Interface implementation
- bool peek(OperationContext* txn, BSONObj* op);
- void consume(OperationContext* txn);
+ bool peek(OperationContext* opCtx, BSONObj* op);
+ void consume(OperationContext* opCtx);
void clearSyncTarget();
void waitForMore();
@@ -118,7 +118,7 @@ public:
BSONObj getCounters();
// Clears any fetched and buffered oplog entries.
- void clearBuffer(OperationContext* txn);
+ void clearBuffer(OperationContext* opCtx);
/**
* Returns true if any of the following is true:
@@ -134,7 +134,7 @@ public:
void startProducerIfStopped();
// Adds a fake oplog entry to buffer. Used for testing only.
- void pushTestOpToBuffer(OperationContext* txn, const BSONObj& op);
+ void pushTestOpToBuffer(OperationContext* opCtx, const BSONObj& op);
private:
bool _inShutdown_inlock() const;
@@ -148,7 +148,7 @@ private:
void _run();
// Production thread inner loop.
void _runProducer();
- void _produce(OperationContext* txn);
+ void _produce(OperationContext* opCtx);
/**
* Checks current background sync state before pushing operations into blocking queue and
@@ -165,15 +165,15 @@ private:
* Executes a rollback.
* 'getConnection' returns a connection to the sync source.
*/
- void _rollback(OperationContext* txn,
+ void _rollback(OperationContext* opCtx,
const HostAndPort& source,
boost::optional<int> requiredRBID,
stdx::function<DBClientBase*()> getConnection);
// restart syncing
- void start(OperationContext* txn);
+ void start(OperationContext* opCtx);
- OpTimeWithHash _readLastAppliedOpTimeWithHash(OperationContext* txn);
+ OpTimeWithHash _readLastAppliedOpTimeWithHash(OperationContext* opCtx);
// Production thread
std::unique_ptr<OplogBuffer> _oplogBuffer;
diff --git a/src/mongo/db/repl/collection_bulk_loader_impl.cpp b/src/mongo/db/repl/collection_bulk_loader_impl.cpp
index ffc6b176032..9b1c96ec95b 100644
--- a/src/mongo/db/repl/collection_bulk_loader_impl.cpp
+++ b/src/mongo/db/repl/collection_bulk_loader_impl.cpp
@@ -51,7 +51,7 @@
namespace mongo {
namespace repl {
-CollectionBulkLoaderImpl::CollectionBulkLoaderImpl(OperationContext* txn,
+CollectionBulkLoaderImpl::CollectionBulkLoaderImpl(OperationContext* opCtx,
Collection* coll,
const BSONObj idIndexSpec,
std::unique_ptr<OldThreadPool> threadPool,
@@ -62,13 +62,13 @@ CollectionBulkLoaderImpl::CollectionBulkLoaderImpl(OperationContext* txn,
_runner(std::move(runner)),
_autoColl(std::move(autoColl)),
_autoDB(std::move(autoDb)),
- _txn(txn),
+ _opCtx(opCtx),
_coll(coll),
_nss{coll->ns()},
- _idIndexBlock(stdx::make_unique<MultiIndexBlock>(txn, coll)),
- _secondaryIndexesBlock(stdx::make_unique<MultiIndexBlock>(txn, coll)),
+ _idIndexBlock(stdx::make_unique<MultiIndexBlock>(opCtx, coll)),
+ _secondaryIndexesBlock(stdx::make_unique<MultiIndexBlock>(opCtx, coll)),
_idIndexSpec(idIndexSpec) {
- invariant(txn);
+ invariant(opCtx);
invariant(coll);
invariant(_runner);
invariant(_autoDB);
@@ -89,10 +89,10 @@ CollectionBulkLoaderImpl::~CollectionBulkLoaderImpl() {
Status CollectionBulkLoaderImpl::init(Collection* coll,
const std::vector<BSONObj>& secondaryIndexSpecs) {
return _runTaskReleaseResourcesOnFailure(
- [coll, &secondaryIndexSpecs, this](OperationContext* txn) -> Status {
- invariant(txn);
+ [coll, &secondaryIndexSpecs, this](OperationContext* opCtx) -> Status {
+ invariant(opCtx);
invariant(coll);
- invariant(txn->getClient() == &cc());
+ invariant(opCtx->getClient() == &cc());
std::vector<BSONObj> specs(secondaryIndexSpecs);
// This enforces the buildIndexes setting in the replica set configuration.
_secondaryIndexesBlock->removeExistingIndexes(&specs);
@@ -122,8 +122,8 @@ Status CollectionBulkLoaderImpl::insertDocuments(const std::vector<BSONObj>::con
const std::vector<BSONObj>::const_iterator end) {
int count = 0;
return _runTaskReleaseResourcesOnFailure(
- [begin, end, &count, this](OperationContext* txn) -> Status {
- invariant(txn);
+ [begin, end, &count, this](OperationContext* opCtx) -> Status {
+ invariant(opCtx);
for (auto iter = begin; iter != end; ++iter) {
std::vector<MultiIndexBlock*> indexers;
@@ -134,15 +134,15 @@ Status CollectionBulkLoaderImpl::insertDocuments(const std::vector<BSONObj>::con
indexers.push_back(_secondaryIndexesBlock.get());
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(txn);
- const auto status = _coll->insertDocument(txn, *iter, indexers, false);
+ WriteUnitOfWork wunit(opCtx);
+ const auto status = _coll->insertDocument(opCtx, *iter, indexers, false);
if (!status.isOK()) {
return status;
}
wunit.commit();
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
- _txn, "CollectionBulkLoaderImpl::insertDocuments", _nss.ns());
+ _opCtx, "CollectionBulkLoaderImpl::insertDocuments", _nss.ns());
++count;
}
@@ -152,11 +152,11 @@ Status CollectionBulkLoaderImpl::insertDocuments(const std::vector<BSONObj>::con
Status CollectionBulkLoaderImpl::commit() {
return _runTaskReleaseResourcesOnFailure(
- [this](OperationContext* txn) -> Status {
+ [this](OperationContext* opCtx) -> Status {
_stats.startBuildingIndexes = Date_t::now();
LOG(2) << "Creating indexes for ns: " << _nss.ns();
- invariant(txn->getClient() == &cc());
- invariant(txn == _txn);
+ invariant(opCtx->getClient() == &cc());
+ invariant(opCtx == _opCtx);
// Commit before deleting dups, so the dups will be removed from secondary indexes when
// deleted.
@@ -173,12 +173,12 @@ Status CollectionBulkLoaderImpl::commit() {
"MultiIndexBlock::ignoreUniqueConstraint set."};
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
_secondaryIndexesBlock->commit();
wunit.commit();
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
- _txn, "CollectionBulkLoaderImpl::commit", _nss.ns());
+ _opCtx, "CollectionBulkLoaderImpl::commit", _nss.ns());
}
if (_idIndexBlock) {
@@ -192,8 +192,8 @@ Status CollectionBulkLoaderImpl::commit() {
for (auto&& it : dups) {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(_txn);
- _coll->deleteDocument(_txn,
+ WriteUnitOfWork wunit(_opCtx);
+ _coll->deleteDocument(_opCtx,
it,
nullptr /** OpDebug **/,
false /* fromMigrate */,
@@ -201,17 +201,17 @@ Status CollectionBulkLoaderImpl::commit() {
wunit.commit();
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
- _txn, "CollectionBulkLoaderImpl::commit", _nss.ns());
+ _opCtx, "CollectionBulkLoaderImpl::commit", _nss.ns());
}
// Commit _id index, without dups.
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
_idIndexBlock->commit();
wunit.commit();
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
- _txn, "CollectionBulkLoaderImpl::commit", _nss.ns());
+ _opCtx, "CollectionBulkLoaderImpl::commit", _nss.ns());
}
_stats.endBuildingIndexes = Date_t::now();
LOG(2) << "Done creating indexes for ns: " << _nss.ns()
@@ -244,9 +244,9 @@ void CollectionBulkLoaderImpl::_releaseResources() {
Status CollectionBulkLoaderImpl::_runTaskReleaseResourcesOnFailure(
TaskRunner::SynchronousTask task, TaskRunner::NextAction nextAction) {
- auto newTask = [this, &task](OperationContext* txn) -> Status {
+ auto newTask = [this, &task](OperationContext* opCtx) -> Status {
ScopeGuard guard = MakeGuard(&CollectionBulkLoaderImpl::_releaseResources, this);
- const auto status = task(txn);
+ const auto status = task(opCtx);
if (status.isOK()) {
guard.Dismiss();
}
diff --git a/src/mongo/db/repl/collection_bulk_loader_impl.h b/src/mongo/db/repl/collection_bulk_loader_impl.h
index 61928e4c385..17b4741ec8f 100644
--- a/src/mongo/db/repl/collection_bulk_loader_impl.h
+++ b/src/mongo/db/repl/collection_bulk_loader_impl.h
@@ -61,7 +61,7 @@ public:
BSONObj toBSON() const;
};
- CollectionBulkLoaderImpl(OperationContext* txn,
+ CollectionBulkLoaderImpl(OperationContext* opCtx,
Collection* coll,
const BSONObj idIndexSpec,
std::unique_ptr<OldThreadPool> threadPool,
@@ -91,7 +91,7 @@ private:
std::unique_ptr<TaskRunner> _runner;
std::unique_ptr<AutoGetCollection> _autoColl;
std::unique_ptr<AutoGetOrCreateDb> _autoDB;
- OperationContext* _txn = nullptr;
+ OperationContext* _opCtx = nullptr;
Collection* _coll = nullptr;
NamespaceString _nss;
std::unique_ptr<MultiIndexBlock> _idIndexBlock;
diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp
index ac1b34c1528..85c9f39169d 100644
--- a/src/mongo/db/repl/collection_cloner.cpp
+++ b/src/mongo/db/repl/collection_cloner.cpp
@@ -119,9 +119,9 @@ CollectionCloner::CollectionCloner(executor::TaskExecutor* executor,
_documents(),
_dbWorkTaskRunner(_dbWorkThreadPool),
_scheduleDbWorkFn([this](const executor::TaskExecutor::CallbackFn& work) {
- auto task = [work](OperationContext* txn,
+ auto task = [work](OperationContext* opCtx,
const Status& status) -> TaskRunner::NextAction {
- work(executor::TaskExecutor::CallbackArgs(nullptr, {}, status, txn));
+ work(executor::TaskExecutor::CallbackArgs(nullptr, {}, status, opCtx));
return TaskRunner::NextAction::kDisposeOperationContext;
};
_dbWorkTaskRunner.schedule(task);
@@ -337,9 +337,10 @@ void CollectionCloner::_listIndexesCallback(const Fetcher::QueryResponseStatus&
_finishCallback(cbd.status);
return;
}
- auto txn = cbd.txn;
- txn->setReplicatedWrites(false);
- auto&& createStatus = _storageInterface->createCollection(txn, _destNss, _options);
+ auto opCtx = cbd.opCtx;
+ opCtx->setReplicatedWrites(false);
+ auto&& createStatus =
+ _storageInterface->createCollection(opCtx, _destNss, _options);
_finishCallback(createStatus);
});
if (!scheduleResult.isOK()) {
diff --git a/src/mongo/db/repl/collection_cloner_test.cpp b/src/mongo/db/repl/collection_cloner_test.cpp
index 8c0ac71ba70..440b8dee232 100644
--- a/src/mongo/db/repl/collection_cloner_test.cpp
+++ b/src/mongo/db/repl/collection_cloner_test.cpp
@@ -405,8 +405,8 @@ TEST_F(CollectionClonerTest, ListIndexesReturnedNamespaceNotFound) {
bool writesAreReplicatedOnOpCtx = false;
NamespaceString collNss;
storageInterface->createCollFn = [&collNss, &collectionCreated, &writesAreReplicatedOnOpCtx](
- OperationContext* txn, const NamespaceString& nss, const CollectionOptions& options) {
- writesAreReplicatedOnOpCtx = txn->writesAreReplicated();
+ OperationContext* opCtx, const NamespaceString& nss, const CollectionOptions& options) {
+ writesAreReplicatedOnOpCtx = opCtx->writesAreReplicated();
collectionCreated = true;
collNss = nss;
return Status::OK();
@@ -458,14 +458,14 @@ TEST_F(CollectionClonerTest,
// Replace scheduleDbWork function to schedule the create collection task with an injected error
// status.
auto exec = &getExecutor();
- collectionCloner->setScheduleDbWorkFn_forTest(
- [exec](const executor::TaskExecutor::CallbackFn& workFn) {
- auto wrappedTask = [workFn](const executor::TaskExecutor::CallbackArgs& cbd) {
- workFn(executor::TaskExecutor::CallbackArgs(
- cbd.executor, cbd.myHandle, Status(ErrorCodes::CallbackCanceled, ""), cbd.txn));
- };
- return exec->scheduleWork(wrappedTask);
- });
+ collectionCloner->setScheduleDbWorkFn_forTest([exec](
+ const executor::TaskExecutor::CallbackFn& workFn) {
+ auto wrappedTask = [workFn](const executor::TaskExecutor::CallbackArgs& cbd) {
+ workFn(executor::TaskExecutor::CallbackArgs(
+ cbd.executor, cbd.myHandle, Status(ErrorCodes::CallbackCanceled, ""), cbd.opCtx));
+ };
+ return exec->scheduleWork(wrappedTask);
+ });
bool collectionCreated = false;
storageInterface->createCollFn = [&collectionCreated](
diff --git a/src/mongo/db/repl/data_replicator.cpp b/src/mongo/db/repl/data_replicator.cpp
index ee990aab967..92e64cbb993 100644
--- a/src/mongo/db/repl/data_replicator.cpp
+++ b/src/mongo/db/repl/data_replicator.cpp
@@ -252,9 +252,9 @@ bool DataReplicator::_isActive_inlock() const {
return State::kRunning == _state || State::kShuttingDown == _state;
}
-Status DataReplicator::startup(OperationContext* txn,
+Status DataReplicator::startup(OperationContext* opCtx,
std::uint32_t initialSyncMaxAttempts) noexcept {
- invariant(txn);
+ invariant(opCtx);
invariant(initialSyncMaxAttempts >= 1U);
stdx::lock_guard<stdx::mutex> lock(_mutex);
@@ -270,7 +270,7 @@ Status DataReplicator::startup(OperationContext* txn,
return Status(ErrorCodes::ShutdownInProgress, "data replicator completed");
}
- _setUp_inlock(txn, initialSyncMaxAttempts);
+ _setUp_inlock(opCtx, initialSyncMaxAttempts);
// Start first initial sync attempt.
std::uint32_t initialSyncAttempt = 0;
@@ -397,32 +397,32 @@ void DataReplicator::setScheduleDbWorkFn_forTest(const CollectionCloner::Schedul
_scheduleDbWorkFn = work;
}
-void DataReplicator::_setUp_inlock(OperationContext* txn, std::uint32_t initialSyncMaxAttempts) {
+void DataReplicator::_setUp_inlock(OperationContext* opCtx, std::uint32_t initialSyncMaxAttempts) {
// This will call through to the storageInterfaceImpl to ReplicationCoordinatorImpl.
- // 'txn' is passed through from startup().
- _storage->setInitialSyncFlag(txn);
+ // 'opCtx' is passed through from startup().
+ _storage->setInitialSyncFlag(opCtx);
LOG(1) << "Creating oplogBuffer.";
- _oplogBuffer = _dataReplicatorExternalState->makeInitialSyncOplogBuffer(txn);
- _oplogBuffer->startup(txn);
+ _oplogBuffer = _dataReplicatorExternalState->makeInitialSyncOplogBuffer(opCtx);
+ _oplogBuffer->startup(opCtx);
_stats.initialSyncStart = _exec->now();
_stats.maxFailedInitialSyncAttempts = initialSyncMaxAttempts;
_stats.failedInitialSyncAttempts = 0;
}
-void DataReplicator::_tearDown_inlock(OperationContext* txn,
+void DataReplicator::_tearDown_inlock(OperationContext* opCtx,
const StatusWith<OpTimeWithHash>& lastApplied) {
_stats.initialSyncEnd = _exec->now();
// This might not be necessary if we failed initial sync.
invariant(_oplogBuffer);
- _oplogBuffer->shutdown(txn);
+ _oplogBuffer->shutdown(opCtx);
if (!lastApplied.isOK()) {
return;
}
- _storage->clearInitialSyncFlag(txn);
+ _storage->clearInitialSyncFlag(opCtx);
_opts.setMyLastOptime(lastApplied.getValue().opTime);
log() << "initial sync done; took "
<< duration_cast<Seconds>(_stats.initialSyncEnd - _stats.initialSyncStart) << ".";
@@ -570,28 +570,28 @@ Status DataReplicator::_recreateOplogAndDropReplicatedDatabases() {
LOG(1) << "About to drop+create the oplog, if it exists, ns:" << _opts.localOplogNS
<< ", and drop all user databases (so that we can clone them).";
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
// We are not replicating nor validating these writes.
- UnreplicatedWritesBlock unreplicatedWritesBlock(txn.get());
+ UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx.get());
// 1.) Drop the oplog.
LOG(2) << "Dropping the existing oplog: " << _opts.localOplogNS;
- auto status = _storage->dropCollection(txn.get(), _opts.localOplogNS);
+ auto status = _storage->dropCollection(opCtx.get(), _opts.localOplogNS);
if (!status.isOK()) {
return status;
}
// 2.) Drop user databases.
LOG(2) << "Dropping user databases";
- status = _storage->dropReplicatedDatabases(txn.get());
+ status = _storage->dropReplicatedDatabases(opCtx.get());
if (!status.isOK()) {
return status;
}
// 3.) Create the oplog.
LOG(2) << "Creating the oplog: " << _opts.localOplogNS;
- return _storage->createOplog(txn.get(), _opts.localOplogNS);
+ return _storage->createOplog(opCtx.get(), _opts.localOplogNS);
}
void DataReplicator::_rollbackCheckerResetCallback(
@@ -833,12 +833,12 @@ void DataReplicator::_lastOplogEntryFetcherCallbackForStopTimestamp(
const auto& oplogSeedDoc = documents.front();
LOG(1) << "inserting oplog seed document: " << oplogSeedDoc;
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
// StorageInterface::insertDocument() has to be called outside the lock because we may
// override its behavior in tests. See DataReplicatorReturnsCallbackCanceledAndDoesNot-
// ScheduleRollbackCheckerIfShutdownAfterInsertingInsertOplogSeedDocument in
// data_replicator_test.cpp
- auto status = _storage->insertDocument(txn.get(), _opts.localOplogNS, oplogSeedDoc);
+ auto status = _storage->insertDocument(opCtx.get(), _opts.localOplogNS, oplogSeedDoc);
if (!status.isOK()) {
stdx::lock_guard<stdx::mutex> lock(_mutex);
onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, status);
@@ -1048,7 +1048,7 @@ void DataReplicator::_finishInitialSyncAttempt(const StatusWith<OpTimeWithHash>&
// For example, if CollectionCloner fails while inserting documents into the
// CollectionBulkLoader, we will get here via one of CollectionCloner's TaskRunner callbacks
// which has an active OperationContext bound to the current Client. This would lead to an
- // invariant when we attempt to create a new OperationContext for _tearDown(txn).
+ // invariant when we attempt to create a new OperationContext for _tearDown(opCtx).
// To avoid this, we schedule _finishCallback against the TaskExecutor rather than calling it
// here synchronously.
@@ -1139,8 +1139,8 @@ void DataReplicator::_finishCallback(StatusWith<OpTimeWithHash> lastApplied) {
decltype(_onCompletion) onCompletion;
{
stdx::lock_guard<stdx::mutex> lock(_mutex);
- auto txn = makeOpCtx();
- _tearDown_inlock(txn.get(), lastApplied);
+ auto opCtx = makeOpCtx();
+ _tearDown_inlock(opCtx.get(), lastApplied);
invariant(_onCompletion);
std::swap(_onCompletion, onCompletion);
@@ -1395,8 +1395,8 @@ StatusWith<Operations> DataReplicator::_getNextApplierBatch_inlock() {
// * only OplogEntries from before the slaveDelay point
// * a single command OplogEntry (including index builds, which appear to be inserts)
// * consequently, commands bound the previous batch to be in a batch of their own
- auto txn = makeOpCtx();
- while (_oplogBuffer->peek(txn.get(), &op)) {
+ auto opCtx = makeOpCtx();
+ while (_oplogBuffer->peek(opCtx.get(), &op)) {
auto entry = OplogEntry(std::move(op));
// Check for oplog version change. If it is absent, its value is one.
@@ -1417,7 +1417,7 @@ StatusWith<Operations> DataReplicator::_getNextApplierBatch_inlock() {
if (ops.empty()) {
// Apply commands one-at-a-time.
ops.push_back(std::move(entry));
- invariant(_oplogBuffer->tryPop(txn.get(), &op));
+ invariant(_oplogBuffer->tryPop(opCtx.get(), &op));
dassert(SimpleBSONObjComparator::kInstance.evaluate(ops.back().raw == op));
}
@@ -1451,7 +1451,7 @@ StatusWith<Operations> DataReplicator::_getNextApplierBatch_inlock() {
// Add op to buffer.
ops.push_back(std::move(entry));
totalBytes += ops.back().raw.objsize();
- invariant(_oplogBuffer->tryPop(txn.get(), &op));
+ invariant(_oplogBuffer->tryPop(opCtx.get(), &op));
dassert(SimpleBSONObjComparator::kInstance.evaluate(ops.back().raw == op));
}
return std::move(ops);
diff --git a/src/mongo/db/repl/data_replicator.h b/src/mongo/db/repl/data_replicator.h
index 62a8f134988..e2b96a92f6a 100644
--- a/src/mongo/db/repl/data_replicator.h
+++ b/src/mongo/db/repl/data_replicator.h
@@ -186,7 +186,7 @@ public:
/**
* Starts initial sync process, with the provided number of attempts
*/
- Status startup(OperationContext* txn, std::uint32_t maxAttempts) noexcept;
+ Status startup(OperationContext* opCtx, std::uint32_t maxAttempts) noexcept;
/**
* Shuts down replication if "start" has been called, and blocks until shutdown has completed.
@@ -336,12 +336,12 @@ private:
/**
* Sets up internal state to begin initial sync.
*/
- void _setUp_inlock(OperationContext* txn, std::uint32_t initialSyncMaxAttempts);
+ void _setUp_inlock(OperationContext* opCtx, std::uint32_t initialSyncMaxAttempts);
/**
* Tears down internal state before reporting final status to caller.
*/
- void _tearDown_inlock(OperationContext* txn, const StatusWith<OpTimeWithHash>& lastApplied);
+ void _tearDown_inlock(OperationContext* opCtx, const StatusWith<OpTimeWithHash>& lastApplied);
/**
* Callback to start a single initial sync attempt.
diff --git a/src/mongo/db/repl/data_replicator_external_state.h b/src/mongo/db/repl/data_replicator_external_state.h
index 0102ddab533..a1b4bf61e87 100644
--- a/src/mongo/db/repl/data_replicator_external_state.h
+++ b/src/mongo/db/repl/data_replicator_external_state.h
@@ -110,13 +110,13 @@ public:
* This function creates an oplog buffer of the type specified at server startup.
*/
virtual std::unique_ptr<OplogBuffer> makeInitialSyncOplogBuffer(
- OperationContext* txn) const = 0;
+ OperationContext* opCtx) const = 0;
/**
* Creates an oplog buffer suitable for steady state replication.
*/
virtual std::unique_ptr<OplogBuffer> makeSteadyStateOplogBuffer(
- OperationContext* txn) const = 0;
+ OperationContext* opCtx) const = 0;
/**
* Returns the current replica set config if there is one, or an error why there isn't.
@@ -130,7 +130,7 @@ private:
*
* Used exclusively by the DataReplicator to construct a MultiApplier.
*/
- virtual StatusWith<OpTime> _multiApply(OperationContext* txn,
+ virtual StatusWith<OpTime> _multiApply(OperationContext* opCtx,
MultiApplier::Operations ops,
MultiApplier::ApplyOperationFn applyOperation) = 0;
diff --git a/src/mongo/db/repl/data_replicator_external_state_impl.cpp b/src/mongo/db/repl/data_replicator_external_state_impl.cpp
index eed85a8216e..e486cfd5278 100644
--- a/src/mongo/db/repl/data_replicator_external_state_impl.cpp
+++ b/src/mongo/db/repl/data_replicator_external_state_impl.cpp
@@ -106,13 +106,13 @@ bool DataReplicatorExternalStateImpl::shouldStopFetching(
}
std::unique_ptr<OplogBuffer> DataReplicatorExternalStateImpl::makeInitialSyncOplogBuffer(
- OperationContext* txn) const {
- return _replicationCoordinatorExternalState->makeInitialSyncOplogBuffer(txn);
+ OperationContext* opCtx) const {
+ return _replicationCoordinatorExternalState->makeInitialSyncOplogBuffer(opCtx);
}
std::unique_ptr<OplogBuffer> DataReplicatorExternalStateImpl::makeSteadyStateOplogBuffer(
- OperationContext* txn) const {
- return _replicationCoordinatorExternalState->makeSteadyStateOplogBuffer(txn);
+ OperationContext* opCtx) const {
+ return _replicationCoordinatorExternalState->makeSteadyStateOplogBuffer(opCtx);
}
StatusWith<ReplSetConfig> DataReplicatorExternalStateImpl::getCurrentConfig() const {
@@ -120,10 +120,10 @@ StatusWith<ReplSetConfig> DataReplicatorExternalStateImpl::getCurrentConfig() co
}
StatusWith<OpTime> DataReplicatorExternalStateImpl::_multiApply(
- OperationContext* txn,
+ OperationContext* opCtx,
MultiApplier::Operations ops,
MultiApplier::ApplyOperationFn applyOperation) {
- return _replicationCoordinatorExternalState->multiApply(txn, std::move(ops), applyOperation);
+ return _replicationCoordinatorExternalState->multiApply(opCtx, std::move(ops), applyOperation);
}
Status DataReplicatorExternalStateImpl::_multiSyncApply(MultiApplier::OperationPtrs* ops) {
diff --git a/src/mongo/db/repl/data_replicator_external_state_impl.h b/src/mongo/db/repl/data_replicator_external_state_impl.h
index 2c5518d7a1d..40a25bcaf67 100644
--- a/src/mongo/db/repl/data_replicator_external_state_impl.h
+++ b/src/mongo/db/repl/data_replicator_external_state_impl.h
@@ -59,14 +59,14 @@ public:
const rpc::ReplSetMetadata& replMetadata,
boost::optional<rpc::OplogQueryMetadata> oqMetadata) override;
- std::unique_ptr<OplogBuffer> makeInitialSyncOplogBuffer(OperationContext* txn) const override;
+ std::unique_ptr<OplogBuffer> makeInitialSyncOplogBuffer(OperationContext* opCtx) const override;
- std::unique_ptr<OplogBuffer> makeSteadyStateOplogBuffer(OperationContext* txn) const override;
+ std::unique_ptr<OplogBuffer> makeSteadyStateOplogBuffer(OperationContext* opCtx) const override;
StatusWith<ReplSetConfig> getCurrentConfig() const override;
private:
- StatusWith<OpTime> _multiApply(OperationContext* txn,
+ StatusWith<OpTime> _multiApply(OperationContext* opCtx,
MultiApplier::Operations ops,
MultiApplier::ApplyOperationFn applyOperation) override;
diff --git a/src/mongo/db/repl/data_replicator_external_state_mock.cpp b/src/mongo/db/repl/data_replicator_external_state_mock.cpp
index a5eb417b403..1f315aee521 100644
--- a/src/mongo/db/repl/data_replicator_external_state_mock.cpp
+++ b/src/mongo/db/repl/data_replicator_external_state_mock.cpp
@@ -81,12 +81,12 @@ bool DataReplicatorExternalStateMock::shouldStopFetching(
}
std::unique_ptr<OplogBuffer> DataReplicatorExternalStateMock::makeInitialSyncOplogBuffer(
- OperationContext* txn) const {
+ OperationContext* opCtx) const {
return stdx::make_unique<OplogBufferBlockingQueue>();
}
std::unique_ptr<OplogBuffer> DataReplicatorExternalStateMock::makeSteadyStateOplogBuffer(
- OperationContext* txn) const {
+ OperationContext* opCtx) const {
return stdx::make_unique<OplogBufferBlockingQueue>();
}
@@ -95,10 +95,10 @@ StatusWith<ReplSetConfig> DataReplicatorExternalStateMock::getCurrentConfig() co
}
StatusWith<OpTime> DataReplicatorExternalStateMock::_multiApply(
- OperationContext* txn,
+ OperationContext* opCtx,
MultiApplier::Operations ops,
MultiApplier::ApplyOperationFn applyOperation) {
- return multiApplyFn(txn, std::move(ops), applyOperation);
+ return multiApplyFn(opCtx, std::move(ops), applyOperation);
}
Status DataReplicatorExternalStateMock::_multiSyncApply(MultiApplier::OperationPtrs* ops) {
diff --git a/src/mongo/db/repl/data_replicator_external_state_mock.h b/src/mongo/db/repl/data_replicator_external_state_mock.h
index ea2943a0749..88ff0df26c5 100644
--- a/src/mongo/db/repl/data_replicator_external_state_mock.h
+++ b/src/mongo/db/repl/data_replicator_external_state_mock.h
@@ -56,9 +56,9 @@ public:
const rpc::ReplSetMetadata& replMetadata,
boost::optional<rpc::OplogQueryMetadata> oqMetadata) override;
- std::unique_ptr<OplogBuffer> makeInitialSyncOplogBuffer(OperationContext* txn) const override;
+ std::unique_ptr<OplogBuffer> makeInitialSyncOplogBuffer(OperationContext* opCtx) const override;
- std::unique_ptr<OplogBuffer> makeSteadyStateOplogBuffer(OperationContext* txn) const override;
+ std::unique_ptr<OplogBuffer> makeSteadyStateOplogBuffer(OperationContext* opCtx) const override;
StatusWith<ReplSetConfig> getCurrentConfig() const override;
@@ -97,7 +97,7 @@ public:
StatusWith<ReplSetConfig> replSetConfigResult = ReplSetConfig();
private:
- StatusWith<OpTime> _multiApply(OperationContext* txn,
+ StatusWith<OpTime> _multiApply(OperationContext* opCtx,
MultiApplier::Operations ops,
MultiApplier::ApplyOperationFn applyOperation) override;
diff --git a/src/mongo/db/repl/data_replicator_test.cpp b/src/mongo/db/repl/data_replicator_test.cpp
index 9a6efec3f83..61a5dcb8f2d 100644
--- a/src/mongo/db/repl/data_replicator_test.cpp
+++ b/src/mongo/db/repl/data_replicator_test.cpp
@@ -265,31 +265,32 @@ protected:
void setUp() override {
executor::ThreadPoolExecutorTest::setUp();
_storageInterface = stdx::make_unique<StorageInterfaceMock>();
- _storageInterface->createOplogFn = [this](OperationContext* txn,
+ _storageInterface->createOplogFn = [this](OperationContext* opCtx,
const NamespaceString& nss) {
LockGuard lock(_storageInterfaceWorkDoneMutex);
_storageInterfaceWorkDone.createOplogCalled = true;
return Status::OK();
};
_storageInterface->insertDocumentFn =
- [this](OperationContext* txn, const NamespaceString& nss, const BSONObj& doc) {
+ [this](OperationContext* opCtx, const NamespaceString& nss, const BSONObj& doc) {
LockGuard lock(_storageInterfaceWorkDoneMutex);
++_storageInterfaceWorkDone.documentsInsertedCount;
return Status::OK();
};
_storageInterface->insertDocumentsFn = [this](
- OperationContext* txn, const NamespaceString& nss, const std::vector<BSONObj>& ops) {
+ OperationContext* opCtx, const NamespaceString& nss, const std::vector<BSONObj>& ops) {
LockGuard lock(_storageInterfaceWorkDoneMutex);
_storageInterfaceWorkDone.insertedOplogEntries = true;
++_storageInterfaceWorkDone.oplogEntriesInserted;
return Status::OK();
};
- _storageInterface->dropCollFn = [this](OperationContext* txn, const NamespaceString& nss) {
+ _storageInterface->dropCollFn = [this](OperationContext* opCtx,
+ const NamespaceString& nss) {
LockGuard lock(_storageInterfaceWorkDoneMutex);
_storageInterfaceWorkDone.droppedCollections.push_back(nss.ns());
return Status::OK();
};
- _storageInterface->dropUserDBsFn = [this](OperationContext* txn) {
+ _storageInterface->dropUserDBsFn = [this](OperationContext* opCtx) {
LockGuard lock(_storageInterfaceWorkDoneMutex);
_storageInterfaceWorkDone.droppedUserDBs = true;
return Status::OK();
@@ -577,66 +578,66 @@ const std::uint32_t maxAttempts = 1U;
TEST_F(DataReplicatorTest, StartupReturnsIllegalOperationIfAlreadyActive) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
ASSERT_FALSE(dr->isActive());
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
ASSERT_TRUE(dr->isActive());
- ASSERT_EQUALS(ErrorCodes::IllegalOperation, dr->startup(txn.get(), maxAttempts));
+ ASSERT_EQUALS(ErrorCodes::IllegalOperation, dr->startup(opCtx.get(), maxAttempts));
ASSERT_TRUE(dr->isActive());
}
TEST_F(DataReplicatorTest, StartupReturnsShutdownInProgressIfDataReplicatorIsShuttingDown) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
ASSERT_FALSE(dr->isActive());
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
ASSERT_TRUE(dr->isActive());
// SyncSourceSelector returns an invalid sync source so DataReplicator is stuck waiting for
// another sync source in 'Options::syncSourceRetryWait' ms.
ASSERT_OK(dr->shutdown());
- ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, dr->startup(txn.get(), maxAttempts));
+ ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, dr->startup(opCtx.get(), maxAttempts));
}
TEST_F(DataReplicatorTest, StartupReturnsShutdownInProgressIfExecutorIsShutdown) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
getExecutor().shutdown();
- ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, dr->startup(txn.get(), maxAttempts));
+ ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, dr->startup(opCtx.get(), maxAttempts));
ASSERT_FALSE(dr->isActive());
// Cannot startup data replicator again since it's in the Complete state.
- ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, dr->startup(txn.get(), maxAttempts));
+ ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, dr->startup(opCtx.get(), maxAttempts));
}
TEST_F(DataReplicatorTest, ShutdownTransitionsStateToCompleteIfCalledBeforeStartup) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
ASSERT_OK(dr->shutdown());
- ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, dr->startup(txn.get(), maxAttempts));
+ ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, dr->startup(opCtx.get(), maxAttempts));
// Data replicator is inactive when it's in the Complete state.
ASSERT_FALSE(dr->isActive());
}
TEST_F(DataReplicatorTest, StartupSetsInitialSyncFlagOnSuccess) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
// Initial sync flag should not be set before starting.
- ASSERT_FALSE(getStorage().getInitialSyncFlag(txn.get()));
+ ASSERT_FALSE(getStorage().getInitialSyncFlag(opCtx.get()));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
ASSERT_TRUE(dr->isActive());
// Initial sync flag should be set.
- ASSERT_TRUE(getStorage().getInitialSyncFlag(txn.get()));
+ ASSERT_TRUE(getStorage().getInitialSyncFlag(opCtx.get()));
}
TEST_F(DataReplicatorTest, DataReplicatorReturnsCallbackCanceledIfShutdownImmediatelyAfterStartup) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
// This will cancel the _startInitialSyncAttemptCallback() task scheduled by startup().
ASSERT_OK(dr->shutdown());
@@ -655,13 +656,13 @@ TEST_F(DataReplicatorTest, DataReplicatorReturnsCallbackCanceledIfShutdownImmedi
TEST_F(DataReplicatorTest,
DataReplicatorRetriesSyncSourceSelectionIfChooseNewSyncSourceReturnsInvalidSyncSource) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
// Override chooseNewSyncSource() result in SyncSourceSelectorMock before calling startup()
// because DataReplicator will look for a valid sync source immediately after startup.
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort());
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
// Run first sync source selection attempt.
executor::NetworkInterfaceMock::InNetworkGuard(getNet())->runReadyNetworkOperations();
@@ -697,13 +698,13 @@ TEST_F(
DataReplicatorTest,
DataReplicatorReturnsInitialSyncOplogSourceMissingIfNoValidSyncSourceCanBeFoundAfterTenFailedChooseSyncSourceAttempts) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
// Override chooseNewSyncSource() result in SyncSourceSelectorMock before calling startup()
// because DataReplicator will look for a valid sync source immediately after startup.
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort());
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
_simulateChooseSyncSourceFailure(getNet(), _options.syncSourceRetryWait);
@@ -718,12 +719,12 @@ TEST_F(
TEST_F(DataReplicatorTest,
DataReplicatorRetriesInitialSyncUpToMaxAttemptsAndReturnsLastAttemptError) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort());
const std::uint32_t initialSyncMaxAttempts = 3U;
- ASSERT_OK(dr->startup(txn.get(), initialSyncMaxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), initialSyncMaxAttempts));
auto net = getNet();
for (std::uint32_t i = 0; i < initialSyncMaxAttempts; ++i) {
@@ -748,10 +749,10 @@ TEST_F(DataReplicatorTest,
TEST_F(DataReplicatorTest,
DataReplicatorReturnsCallbackCanceledIfShutdownWhileRetryingSyncSourceSelection) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort());
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto net = getNet();
{
@@ -774,11 +775,11 @@ TEST_F(
DataReplicatorTest,
DataReplicatorReturnsScheduleErrorIfTaskExecutorFailsToScheduleNextChooseSyncSourceCallback) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort());
_executorProxy->shouldFailScheduleWorkAt = true;
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
dr->join();
@@ -788,13 +789,13 @@ TEST_F(
TEST_F(DataReplicatorTest,
DataReplicatorReturnsScheduleErrorIfTaskExecutorFailsToScheduleNextInitialSyncAttempt) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort());
ASSERT_EQUALS(DataReplicator::State::kPreStart, dr->getState_forTest());
- ASSERT_OK(dr->startup(txn.get(), 2U));
+ ASSERT_OK(dr->startup(opCtx.get(), 2U));
ASSERT_EQUALS(DataReplicator::State::kRunning, dr->getState_forTest());
// Advance clock so that we run all but the last sync source callback.
@@ -816,7 +817,7 @@ TEST_F(DataReplicatorTest,
// the completion callback function throws an exception.
TEST_F(DataReplicatorTest, DataReplicatorTransitionsToCompleteWhenFinishCallbackThrowsException) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_onCompletion = [this](const StatusWith<OpTimeWithHash>& lastApplied) {
_lastApplied = lastApplied;
@@ -824,7 +825,7 @@ TEST_F(DataReplicatorTest, DataReplicatorTransitionsToCompleteWhenFinishCallback
};
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort());
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
ASSERT_OK(dr->shutdown());
dr->join();
@@ -862,9 +863,9 @@ TEST_F(DataReplicatorTest, DataReplicatorResetsOnCompletionCallbackFunctionPoint
});
ON_BLOCK_EXIT([this]() { getExecutor().shutdown(); });
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
sharedCallbackData.reset();
ASSERT_FALSE(sharedCallbackStateDestroyed);
@@ -891,17 +892,17 @@ TEST_F(DataReplicatorTest, DataReplicatorRecreatesOplogAndDropsReplicatedDatabas
// We are not interested in proceeding beyond the oplog creation stage so we inject a failure
// after setting '_storageInterfaceWorkDone.createOplogCalled' to true.
auto oldCreateOplogFn = _storageInterface->createOplogFn;
- _storageInterface->createOplogFn = [oldCreateOplogFn](OperationContext* txn,
+ _storageInterface->createOplogFn = [oldCreateOplogFn](OperationContext* opCtx,
const NamespaceString& nss) {
- oldCreateOplogFn(txn, nss);
+ oldCreateOplogFn(opCtx, nss);
return Status(ErrorCodes::OperationFailed, "oplog creation failed");
};
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
dr->join();
ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
@@ -913,7 +914,7 @@ TEST_F(DataReplicatorTest, DataReplicatorRecreatesOplogAndDropsReplicatedDatabas
TEST_F(DataReplicatorTest, DataReplicatorPassesThroughGetRollbackIdScheduleError) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
// replSetGetRBID is the first remote command to be scheduled by the data replicator after
// creating the oplog collection.
@@ -925,7 +926,7 @@ TEST_F(DataReplicatorTest, DataReplicatorPassesThroughGetRollbackIdScheduleError
HostAndPort syncSource("localhost", 12345);
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(syncSource);
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
dr->join();
ASSERT_EQUALS(ErrorCodes::OperationFailed, _lastApplied);
@@ -942,18 +943,18 @@ TEST_F(
// down before returning from createOplog() to make the scheduleRemoteCommand() call for
// replSetGetRBID fail.
auto oldCreateOplogFn = _storageInterface->createOplogFn;
- _storageInterface->createOplogFn = [oldCreateOplogFn, this](OperationContext* txn,
+ _storageInterface->createOplogFn = [oldCreateOplogFn, this](OperationContext* opCtx,
const NamespaceString& nss) {
- auto status = oldCreateOplogFn(txn, nss);
+ auto status = oldCreateOplogFn(opCtx, nss);
getExecutor().shutdown();
return status;
};
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
dr->join();
ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, _lastApplied);
@@ -964,14 +965,14 @@ TEST_F(
TEST_F(DataReplicatorTest, DataReplicatorCancelsRollbackCheckerOnShutdown) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
HostAndPort syncSource("localhost", 12345);
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(syncSource);
ASSERT_EQUALS(DataReplicator::State::kPreStart, dr->getState_forTest());
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
ASSERT_EQUALS(DataReplicator::State::kRunning, dr->getState_forTest());
auto net = getNet();
@@ -1000,10 +1001,10 @@ TEST_F(DataReplicatorTest, DataReplicatorCancelsRollbackCheckerOnShutdown) {
TEST_F(DataReplicatorTest, DataReplicatorPassesThroughRollbackCheckerCallbackError) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto net = getNet();
{
@@ -1021,7 +1022,7 @@ TEST_F(DataReplicatorTest, DataReplicatorPassesThroughRollbackCheckerCallbackErr
TEST_F(DataReplicatorTest, DataReplicatorPassesThroughLastOplogEntryFetcherScheduleError) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
// The last oplog entry fetcher is the first component that sends a find command so we reject
// any find commands and save the request for inspection at the end of this test case.
@@ -1033,7 +1034,7 @@ TEST_F(DataReplicatorTest, DataReplicatorPassesThroughLastOplogEntryFetcherSched
HostAndPort syncSource("localhost", 12345);
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(syncSource);
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto net = getNet();
{
@@ -1056,10 +1057,10 @@ TEST_F(DataReplicatorTest, DataReplicatorPassesThroughLastOplogEntryFetcherSched
TEST_F(DataReplicatorTest, DataReplicatorPassesThroughLastOplogEntryFetcherCallbackError) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto net = getNet();
{
@@ -1082,10 +1083,10 @@ TEST_F(DataReplicatorTest, DataReplicatorPassesThroughLastOplogEntryFetcherCallb
TEST_F(DataReplicatorTest, DataReplicatorCancelsLastOplogEntryFetcherOnShutdown) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto net = getNet();
{
@@ -1108,10 +1109,10 @@ TEST_F(DataReplicatorTest, DataReplicatorCancelsLastOplogEntryFetcherOnShutdown)
TEST_F(DataReplicatorTest,
DataReplicatorReturnsNoMatchingDocumentIfLastOplogEntryFetcherReturnsEmptyBatchOfDocuments) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto net = getNet();
{
@@ -1132,10 +1133,10 @@ TEST_F(DataReplicatorTest,
TEST_F(DataReplicatorTest,
DataReplicatorResendsFindCommandIfLastOplogEntryFetcherReturnsRetriableError) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto net = getNet();
executor::NetworkInterfaceMock::InNetworkGuard guard(net);
@@ -1159,10 +1160,10 @@ TEST_F(DataReplicatorTest,
TEST_F(DataReplicatorTest,
DataReplicatorReturnsNoSuchKeyIfLastOplogEntryFetcherReturnsEntryWithMissingHash) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto net = getNet();
{
@@ -1183,10 +1184,10 @@ TEST_F(DataReplicatorTest,
TEST_F(DataReplicatorTest,
DataReplicatorReturnsNoSuchKeyIfLastOplogEntryFetcherReturnsEntryWithMissingTimestamp) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto net = getNet();
{
@@ -1207,12 +1208,12 @@ TEST_F(DataReplicatorTest,
TEST_F(DataReplicatorTest,
DataReplicatorPassesThroughErrorFromDataReplicatorExternalStateGetCurrentConfig) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
getExternalState()->replSetConfigResult = Status(ErrorCodes::OperationFailed, "");
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto net = getNet();
{
@@ -1232,7 +1233,7 @@ TEST_F(DataReplicatorTest,
TEST_F(DataReplicatorTest, DataReplicatorPassesThroughOplogFetcherScheduleError) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
// Make the tailable oplog query fail. Allow all other requests to be scheduled.
executor::RemoteCommandRequest request;
@@ -1247,7 +1248,7 @@ TEST_F(DataReplicatorTest, DataReplicatorPassesThroughOplogFetcherScheduleError)
HostAndPort syncSource("localhost", 12345);
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(syncSource);
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto net = getNet();
{
@@ -1273,10 +1274,10 @@ TEST_F(DataReplicatorTest, DataReplicatorPassesThroughOplogFetcherScheduleError)
TEST_F(DataReplicatorTest, DataReplicatorPassesThroughOplogFetcherCallbackError) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto net = getNet();
{
@@ -1311,10 +1312,10 @@ TEST_F(DataReplicatorTest, DataReplicatorPassesThroughOplogFetcherCallbackError)
TEST_F(DataReplicatorTest,
DataReplicatorSucceedsOnEarlyOplogFetcherCompletionIfThereAreNoOperationsToApply) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto net = getNet();
{
@@ -1363,10 +1364,10 @@ TEST_F(
DataReplicatorTest,
DataReplicatorSucceedsOnEarlyOplogFetcherCompletionIfThereAreEnoughOperationsInTheOplogBufferToReachEndTimestamp) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto net = getNet();
{
@@ -1412,10 +1413,10 @@ TEST_F(
DataReplicatorTest,
DataReplicatorReturnsRemoteResultsUnavailableOnEarlyOplogFetcherCompletionIfThereAreNotEnoughOperationsInTheOplogBufferToReachEndTimestamp) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto net = getNet();
{
@@ -1456,7 +1457,7 @@ TEST_F(
TEST_F(DataReplicatorTest,
DataReplicatorPassesThroughDatabasesClonerScheduleErrorAndCancelsOplogFetcher) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
// Make the listDatabases command fail. Allow all other requests to be scheduled.
executor::RemoteCommandRequest request;
@@ -1470,7 +1471,7 @@ TEST_F(DataReplicatorTest,
HostAndPort syncSource("localhost", 12345);
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(syncSource);
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto net = getNet();
{
@@ -1504,10 +1505,10 @@ TEST_F(DataReplicatorTest,
TEST_F(DataReplicatorTest,
DataReplicatorPassesThroughDatabasesClonerCallbackErrorAndCancelsOplogFetcher) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto net = getNet();
{
@@ -1544,10 +1545,10 @@ TEST_F(DataReplicatorTest,
TEST_F(DataReplicatorTest, DataReplicatorIgnoresLocalDatabasesWhenCloningDatabases) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto net = getNet();
{
@@ -1604,10 +1605,10 @@ TEST_F(DataReplicatorTest, DataReplicatorIgnoresLocalDatabasesWhenCloningDatabas
TEST_F(DataReplicatorTest,
DataReplicatorIgnoresDatabaseInfoDocumentWithoutNameFieldWhenCloningDatabases) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto net = getNet();
{
@@ -1670,10 +1671,10 @@ TEST_F(DataReplicatorTest,
TEST_F(DataReplicatorTest, DataReplicatorCancelsBothOplogFetcherAndDatabasesClonerOnShutdown) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto net = getNet();
{
@@ -1697,7 +1698,7 @@ TEST_F(DataReplicatorTest, DataReplicatorCancelsBothOplogFetcherAndDatabasesClon
TEST_F(DataReplicatorTest,
DataReplicatorPassesThroughSecondLastOplogEntryFetcherScheduleErrorAndCancelsOplogFetcher) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
// Make the second last oplog entry fetcher command fail. Allow all other requests to be
// scheduled.
@@ -1718,7 +1719,7 @@ TEST_F(DataReplicatorTest,
};
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto net = getNet();
{
@@ -1758,10 +1759,10 @@ TEST_F(DataReplicatorTest,
TEST_F(DataReplicatorTest,
DataReplicatorPassesThroughSecondLastOplogEntryFetcherCallbackErrorAndCancelsOplogFetcher) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto net = getNet();
{
@@ -1810,10 +1811,10 @@ TEST_F(DataReplicatorTest,
TEST_F(DataReplicatorTest,
DataReplicatorCancelsBothSecondLastOplogEntryFetcherAndOplogFetcherOnShutdown) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto net = getNet();
{
@@ -1857,10 +1858,10 @@ TEST_F(DataReplicatorTest,
TEST_F(DataReplicatorTest,
DataReplicatorCancelsSecondLastOplogEntryFetcherOnOplogFetcherCallbackError) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto net = getNet();
{
@@ -1914,10 +1915,10 @@ TEST_F(
DataReplicatorTest,
DataReplicatorReturnsTypeMismatchErrorWhenSecondLastOplogEntryFetcherReturnsMalformedDocument) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto oplogEntry = makeOplogEntry(1);
auto net = getNet();
@@ -1963,10 +1964,10 @@ TEST_F(
TEST_F(DataReplicatorTest,
DataReplicatorReturnsOplogOutOfOrderIfStopTimestampPrecedesBeginTimestamp) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto net = getNet();
{
@@ -2010,7 +2011,7 @@ TEST_F(
DataReplicatorTest,
DataReplicatorPassesThroughInsertOplogSeedDocumentErrorAfterDataCloningFinishesWithNoOperationsToApply) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
NamespaceString insertDocumentNss;
BSONObj insertDocumentDoc;
@@ -2022,7 +2023,7 @@ TEST_F(
};
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto oplogEntry = makeOplogEntry(1);
auto net = getNet();
@@ -2069,7 +2070,7 @@ TEST_F(
DataReplicatorTest,
DataReplicatorReturnsCallbackCanceledAndDoesNotScheduleRollbackCheckerIfShutdownAfterInsertingInsertOplogSeedDocument) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
NamespaceString insertDocumentNss;
BSONObj insertDocumentDoc;
@@ -2082,7 +2083,7 @@ TEST_F(
};
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto oplogEntry = makeOplogEntry(1);
auto net = getNet();
@@ -2129,7 +2130,7 @@ TEST_F(
DataReplicatorTest,
DataReplicatorPassesThroughRollbackCheckerScheduleErrorAfterCloningFinishesWithNoOperationsToApply) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
// Make the second replSetGetRBID command fail. Allow all other requests to be scheduled.
executor::RemoteCommandRequest request;
@@ -2147,7 +2148,7 @@ TEST_F(
};
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto oplogEntry = makeOplogEntry(1);
auto net = getNet();
@@ -2192,10 +2193,10 @@ TEST_F(
DataReplicatorTest,
DataReplicatorPassesThroughRollbackCheckerCallbackErrorAfterCloningFinishesWithNoOperationsToApply) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto oplogEntry = makeOplogEntry(1);
auto net = getNet();
@@ -2245,10 +2246,10 @@ TEST_F(
TEST_F(DataReplicatorTest, DataReplicatorCancelsLastRollbackCheckerOnShutdown) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto oplogEntry = makeOplogEntry(1);
auto net = getNet();
@@ -2299,10 +2300,10 @@ TEST_F(DataReplicatorTest, DataReplicatorCancelsLastRollbackCheckerOnShutdown) {
TEST_F(DataReplicatorTest, DataReplicatorCancelsLastRollbackCheckerOnOplogFetcherCallbackError) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto oplogEntry = makeOplogEntry(1);
auto net = getNet();
@@ -2357,10 +2358,10 @@ TEST_F(DataReplicatorTest, DataReplicatorCancelsLastRollbackCheckerOnOplogFetche
TEST_F(DataReplicatorTest,
DataReplicatorReturnsUnrecoverableRollbackErrorIfSyncSourceRolledBackAfterCloningData) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto oplogEntry = makeOplogEntry(1);
auto net = getNet();
@@ -2405,12 +2406,12 @@ TEST_F(DataReplicatorTest,
TEST_F(DataReplicatorTest, LastOpTimeShouldBeSetEvenIfNoOperationsAreAppliedAfterCloning) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
- ASSERT_TRUE(_storageInterface->getInitialSyncFlag(txn.get()));
+ ASSERT_TRUE(_storageInterface->getInitialSyncFlag(opCtx.get()));
auto oplogEntry = makeOplogEntry(1);
auto net = getNet();
@@ -2488,17 +2489,17 @@ TEST_F(DataReplicatorTest, LastOpTimeShouldBeSetEvenIfNoOperationsAreAppliedAfte
dr->join();
ASSERT_EQUALS(OplogEntry(oplogEntry).getOpTime(), unittest::assertGet(_lastApplied).opTime);
ASSERT_EQUALS(oplogEntry["h"].Long(), unittest::assertGet(_lastApplied).value);
- ASSERT_FALSE(_storageInterface->getInitialSyncFlag(txn.get()));
+ ASSERT_FALSE(_storageInterface->getInitialSyncFlag(opCtx.get()));
}
TEST_F(DataReplicatorTest, DataReplicatorPassesThroughGetNextApplierBatchScheduleError) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
- ASSERT_TRUE(_storageInterface->getInitialSyncFlag(txn.get()));
+ ASSERT_TRUE(_storageInterface->getInitialSyncFlag(opCtx.get()));
auto net = getNet();
int baseRollbackId = 1;
@@ -2547,12 +2548,12 @@ TEST_F(DataReplicatorTest, DataReplicatorPassesThroughGetNextApplierBatchSchedul
TEST_F(DataReplicatorTest, DataReplicatorPassesThroughSecondGetNextApplierBatchScheduleError) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
- ASSERT_TRUE(_storageInterface->getInitialSyncFlag(txn.get()));
+ ASSERT_TRUE(_storageInterface->getInitialSyncFlag(opCtx.get()));
auto net = getNet();
int baseRollbackId = 1;
@@ -2601,12 +2602,12 @@ TEST_F(DataReplicatorTest, DataReplicatorPassesThroughSecondGetNextApplierBatchS
TEST_F(DataReplicatorTest, DataReplicatorCancelsGetNextApplierBatchOnShutdown) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
- ASSERT_TRUE(_storageInterface->getInitialSyncFlag(txn.get()));
+ ASSERT_TRUE(_storageInterface->getInitialSyncFlag(opCtx.get()));
auto net = getNet();
int baseRollbackId = 1;
@@ -2651,12 +2652,12 @@ TEST_F(DataReplicatorTest, DataReplicatorCancelsGetNextApplierBatchOnShutdown) {
TEST_F(DataReplicatorTest, DataReplicatorPassesThroughGetNextApplierBatchInLockError) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
- ASSERT_TRUE(_storageInterface->getInitialSyncFlag(txn.get()));
+ ASSERT_TRUE(_storageInterface->getInitialSyncFlag(opCtx.get()));
// _getNextApplierBatch_inlock() returns BadValue when it gets an oplog entry with an unexpected
// version (not OplogEntry::kOplogVersion).
@@ -2715,12 +2716,12 @@ TEST_F(
DataReplicatorTest,
DataReplicatorReturnsEmptyBatchFromGetNextApplierBatchInLockIfRsSyncApplyStopFailPointIsEnabled) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
- ASSERT_TRUE(_storageInterface->getInitialSyncFlag(txn.get()));
+ ASSERT_TRUE(_storageInterface->getInitialSyncFlag(opCtx.get()));
// _getNextApplierBatch_inlock() returns BadValue when it gets an oplog entry with an unexpected
// version (not OplogEntry::kOplogVersion).
@@ -2790,12 +2791,12 @@ TEST_F(
TEST_F(DataReplicatorTest,
DataReplicatorReturnsNoSuchKeyIfApplierBatchContainsAnOplogEntryWithoutHash) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
- ASSERT_TRUE(_storageInterface->getInitialSyncFlag(txn.get()));
+ ASSERT_TRUE(_storageInterface->getInitialSyncFlag(opCtx.get()));
// This oplog entry (without a required "h" field) will be read by OplogFetcher and inserted
// into OplogBuffer to be retrieved by _getNextApplierBatch_inlock().
@@ -2848,12 +2849,12 @@ TEST_F(DataReplicatorTest,
TEST_F(DataReplicatorTest, DataReplicatorPassesThroughMultiApplierScheduleError) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
- ASSERT_TRUE(_storageInterface->getInitialSyncFlag(txn.get()));
+ ASSERT_TRUE(_storageInterface->getInitialSyncFlag(opCtx.get()));
auto net = getNet();
int baseRollbackId = 1;
@@ -2920,14 +2921,14 @@ TEST_F(DataReplicatorTest, DataReplicatorPassesThroughMultiApplierScheduleError)
TEST_F(DataReplicatorTest, DataReplicatorPassesThroughMultiApplierCallbackError) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
getExternalState()->multiApplyFn =
[](OperationContext*, const MultiApplier::Operations&, MultiApplier::ApplyOperationFn) {
return Status(ErrorCodes::OperationFailed, "multiApply failed");
};
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto net = getNet();
int baseRollbackId = 1;
@@ -2975,10 +2976,10 @@ TEST_F(DataReplicatorTest, DataReplicatorPassesThroughMultiApplierCallbackError)
TEST_F(DataReplicatorTest, DataReplicatorCancelsGetNextApplierBatchCallbackOnOplogFetcherError) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto net = getNet();
int baseRollbackId = 1;
@@ -3026,10 +3027,10 @@ TEST_F(DataReplicatorTest, DataReplicatorCancelsGetNextApplierBatchCallbackOnOpl
TEST_F(DataReplicatorTest,
DataReplicatorReturnsLastAppliedOnReachingStopTimestampAfterApplyingOneBatch) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto lastOp = makeOplogEntry(2);
@@ -3088,10 +3089,10 @@ TEST_F(DataReplicatorTest,
TEST_F(DataReplicatorTest,
DataReplicatorReturnsLastAppliedOnReachingStopTimestampAfterApplyingMultipleBatches) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
// To make DataReplicator apply multiple batches, we make the third and last operation a command
// so that it will go into a separate batch from the second operation. First operation is the
@@ -3186,7 +3187,7 @@ TEST_F(
DataReplicatorTest,
DataReplicatorSchedulesLastOplogEntryFetcherToGetNewStopTimestampIfMissingDocumentsHaveBeenFetchedDuringMultiInitialSyncApply) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
// Override DataReplicatorExternalState::_multiInitialSyncApply() so that it will also fetch a
// missing document.
@@ -3210,7 +3211,7 @@ TEST_F(
};
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
// Use command for third and last operation to ensure we have two batches to apply.
auto lastOp = makeOplogEntry(3, "c");
@@ -3281,7 +3282,7 @@ TEST_F(
TEST_F(DataReplicatorTest,
DataReplicatorReturnsInvalidSyncSourceWhenFailInitialSyncWithBadHostFailpointIsEnabled) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
// This fail point makes chooseSyncSourceCallback fail with an InvalidSyncSource error.
auto failPoint = getGlobalFailPointRegistry()->getFailPoint("failInitialSyncWithBadHost");
@@ -3289,7 +3290,7 @@ TEST_F(DataReplicatorTest,
ON_BLOCK_EXIT([failPoint]() { failPoint->setMode(FailPoint::off); });
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
dr->join();
ASSERT_EQUALS(ErrorCodes::InvalidSyncSource, _lastApplied);
@@ -3297,10 +3298,10 @@ TEST_F(DataReplicatorTest,
TEST_F(DataReplicatorTest, OplogOutOfOrderOnOplogFetchFinish) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 12345));
- ASSERT_OK(dr->startup(txn.get(), maxAttempts));
+ ASSERT_OK(dr->startup(opCtx.get(), maxAttempts));
auto net = getNet();
int baseRollbackId = 1;
@@ -3346,10 +3347,10 @@ TEST_F(DataReplicatorTest, OplogOutOfOrderOnOplogFetchFinish) {
TEST_F(DataReplicatorTest, GetInitialSyncProgressReturnsCorrectProgress) {
auto dr = &getDR();
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 27017));
- ASSERT_OK(dr->startup(txn.get(), 2U));
+ ASSERT_OK(dr->startup(opCtx.get(), 2U));
auto net = getNet();
int baseRollbackId = 1;
diff --git a/src/mongo/db/repl/database_task.cpp b/src/mongo/db/repl/database_task.cpp
index b19bf201b5d..5c4f9422ea9 100644
--- a/src/mongo/db/repl/database_task.cpp
+++ b/src/mongo/db/repl/database_task.cpp
@@ -41,16 +41,16 @@ namespace repl {
// static
DatabaseTask::Task DatabaseTask::makeGlobalExclusiveLockTask(const Task& task) {
invariant(task);
- DatabaseTask::Task newTask = [task](OperationContext* txn, const Status& status) {
+ DatabaseTask::Task newTask = [task](OperationContext* opCtx, const Status& status) {
if (!status.isOK()) {
- return task(txn, status);
+ return task(opCtx, status);
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lock(txn->lockState());
- return task(txn, status);
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite lock(opCtx->lockState());
+ return task(opCtx, status);
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "globalExclusiveLockTask", "global");
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "globalExclusiveLockTask", "global");
MONGO_UNREACHABLE;
};
return newTask;
@@ -61,17 +61,17 @@ DatabaseTask::Task DatabaseTask::makeDatabaseLockTask(const Task& task,
const std::string& databaseName,
LockMode mode) {
invariant(task);
- DatabaseTask::Task newTask = [=](OperationContext* txn, const Status& status) {
+ DatabaseTask::Task newTask = [=](OperationContext* opCtx, const Status& status) {
if (!status.isOK()) {
- return task(txn, status);
+ return task(opCtx, status);
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
LockMode permissiveLockMode = isSharedLockMode(mode) ? MODE_IS : MODE_IX;
- ScopedTransaction transaction(txn, permissiveLockMode);
- Lock::DBLock lock(txn->lockState(), databaseName, mode);
- return task(txn, status);
+ ScopedTransaction transaction(opCtx, permissiveLockMode);
+ Lock::DBLock lock(opCtx->lockState(), databaseName, mode);
+ return task(opCtx, status);
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "databaseLockTask", databaseName);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "databaseLockTask", databaseName);
MONGO_UNREACHABLE;
};
return newTask;
@@ -82,18 +82,18 @@ DatabaseTask::Task DatabaseTask::makeCollectionLockTask(const Task& task,
const NamespaceString& nss,
LockMode mode) {
invariant(task);
- DatabaseTask::Task newTask = [=](OperationContext* txn, const Status& status) {
+ DatabaseTask::Task newTask = [=](OperationContext* opCtx, const Status& status) {
if (!status.isOK()) {
- return task(txn, status);
+ return task(opCtx, status);
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
LockMode permissiveLockMode = isSharedLockMode(mode) ? MODE_IS : MODE_IX;
- ScopedTransaction transaction(txn, permissiveLockMode);
- Lock::DBLock lock(txn->lockState(), nss.db(), permissiveLockMode);
- Lock::CollectionLock collectionLock(txn->lockState(), nss.toString(), mode);
- return task(txn, status);
+ ScopedTransaction transaction(opCtx, permissiveLockMode);
+ Lock::DBLock lock(opCtx->lockState(), nss.db(), permissiveLockMode);
+ Lock::CollectionLock collectionLock(opCtx->lockState(), nss.toString(), mode);
+ return task(opCtx, status);
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "collectionLockTask", nss.toString());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "collectionLockTask", nss.toString());
MONGO_UNREACHABLE;
};
return newTask;
diff --git a/src/mongo/db/repl/database_task_test.cpp b/src/mongo/db/repl/database_task_test.cpp
index 5c004c466d7..beaa896acf0 100644
--- a/src/mongo/db/repl/database_task_test.cpp
+++ b/src/mongo/db/repl/database_task_test.cpp
@@ -49,8 +49,8 @@ class DatabaseTaskTest : public TaskRunnerTest {};
TEST_F(DatabaseTaskTest, TaskRunnerErrorStatus) {
// Should not attempt to acquire lock on error status from task runner.
- auto task = [](OperationContext* txn, const Status& status) {
- ASSERT_FALSE(txn);
+ auto task = [](OperationContext* opCtx, const Status& status) {
+ ASSERT_FALSE(opCtx);
ASSERT_EQUALS(ErrorCodes::BadValue, status.code());
return TaskRunner::NextAction::kInvalid;
};
@@ -66,15 +66,15 @@ TEST_F(DatabaseTaskTest, TaskRunnerErrorStatus) {
TEST_F(DatabaseTaskTest, RunGlobalExclusiveLockTask) {
stdx::mutex mutex;
bool called = false;
- OperationContext* txn = nullptr;
+ OperationContext* opCtx = nullptr;
bool lockIsW = false;
Status status = getDetectableErrorStatus();
// Task returning 'void' implies NextAction::NoAction.
auto task = [&](OperationContext* theTxn, const Status& theStatus) {
stdx::lock_guard<stdx::mutex> lk(mutex);
called = true;
- txn = theTxn;
- lockIsW = txn->lockState()->isW();
+ opCtx = theTxn;
+ lockIsW = opCtx->lockState()->isW();
status = theStatus;
return TaskRunner::NextAction::kCancel;
};
@@ -84,7 +84,7 @@ TEST_F(DatabaseTaskTest, RunGlobalExclusiveLockTask) {
stdx::lock_guard<stdx::mutex> lk(mutex);
ASSERT_TRUE(called);
- ASSERT(txn);
+ ASSERT(opCtx);
ASSERT_TRUE(lockIsW);
ASSERT_OK(status);
}
@@ -92,15 +92,15 @@ TEST_F(DatabaseTaskTest, RunGlobalExclusiveLockTask) {
void _testRunDatabaseLockTask(DatabaseTaskTest& test, LockMode mode) {
stdx::mutex mutex;
bool called = false;
- OperationContext* txn = nullptr;
+ OperationContext* opCtx = nullptr;
bool isDatabaseLockedForMode = false;
Status status = test.getDetectableErrorStatus();
// Task returning 'void' implies NextAction::NoAction.
auto task = [&](OperationContext* theTxn, const Status& theStatus) {
stdx::lock_guard<stdx::mutex> lk(mutex);
called = true;
- txn = theTxn;
- isDatabaseLockedForMode = txn->lockState()->isDbLockedForMode(databaseName, mode);
+ opCtx = theTxn;
+ isDatabaseLockedForMode = opCtx->lockState()->isDbLockedForMode(databaseName, mode);
status = theStatus;
return TaskRunner::NextAction::kCancel;
};
@@ -110,7 +110,7 @@ void _testRunDatabaseLockTask(DatabaseTaskTest& test, LockMode mode) {
stdx::lock_guard<stdx::mutex> lk(mutex);
ASSERT_TRUE(called);
- ASSERT(txn);
+ ASSERT(opCtx);
ASSERT_TRUE(isDatabaseLockedForMode);
ASSERT_OK(status);
}
@@ -134,16 +134,16 @@ TEST_F(DatabaseTaskTest, RunDatabaseLockTaskModeIS) {
void _testRunCollectionLockTask(DatabaseTaskTest& test, LockMode mode) {
stdx::mutex mutex;
bool called = false;
- OperationContext* txn = nullptr;
+ OperationContext* opCtx = nullptr;
bool isCollectionLockedForMode = false;
Status status = test.getDetectableErrorStatus();
// Task returning 'void' implies NextAction::NoAction.
auto task = [&](OperationContext* theTxn, const Status& theStatus) {
stdx::lock_guard<stdx::mutex> lk(mutex);
called = true;
- txn = theTxn;
+ opCtx = theTxn;
isCollectionLockedForMode =
- txn->lockState()->isCollectionLockedForMode(nss.toString(), mode);
+ opCtx->lockState()->isCollectionLockedForMode(nss.toString(), mode);
status = theStatus;
return TaskRunner::NextAction::kCancel;
};
@@ -153,7 +153,7 @@ void _testRunCollectionLockTask(DatabaseTaskTest& test, LockMode mode) {
stdx::lock_guard<stdx::mutex> lk(mutex);
ASSERT_TRUE(called);
- ASSERT(txn);
+ ASSERT(opCtx);
ASSERT_TRUE(isCollectionLockedForMode);
ASSERT_OK(status);
}
diff --git a/src/mongo/db/repl/databases_cloner.cpp b/src/mongo/db/repl/databases_cloner.cpp
index 3f7c1ce11e4..ff0c9170e15 100644
--- a/src/mongo/db/repl/databases_cloner.cpp
+++ b/src/mongo/db/repl/databases_cloner.cpp
@@ -354,13 +354,13 @@ void DatabasesCloner::_onEachDBCloneFinish(const Status& status, const std::stri
auto adminStatus = Status(ErrorCodes::NotYetInitialized, "");
{
// TODO: Move isAdminDbValid() out of the collection/database cloner code paths.
- OperationContext* txn = cc().getOperationContext();
- ServiceContext::UniqueOperationContext txnPtr;
- if (!txn) {
- txnPtr = cc().makeOperationContext();
- txn = txnPtr.get();
+ OperationContext* opCtx = cc().getOperationContext();
+ ServiceContext::UniqueOperationContext opCtxPtr;
+ if (!opCtx) {
+ opCtxPtr = cc().makeOperationContext();
+ opCtx = opCtxPtr.get();
}
- adminStatus = _storage->isAdminDbValid(txn);
+ adminStatus = _storage->isAdminDbValid(opCtx);
}
if (!adminStatus.isOK()) {
LOG(1) << "Validation failed on 'admin' db due to " << adminStatus;
diff --git a/src/mongo/db/repl/databases_cloner_test.cpp b/src/mongo/db/repl/databases_cloner_test.cpp
index 366e8c04a7c..777a4eedacd 100644
--- a/src/mongo/db/repl/databases_cloner_test.cpp
+++ b/src/mongo/db/repl/databases_cloner_test.cpp
@@ -142,27 +142,27 @@ protected:
executor::ThreadPoolExecutorTest::setUp();
launchExecutorThread();
- _storageInterface.createOplogFn = [this](OperationContext* txn,
+ _storageInterface.createOplogFn = [this](OperationContext* opCtx,
const NamespaceString& nss) {
_storageInterfaceWorkDone.createOplogCalled = true;
return Status::OK();
};
_storageInterface.insertDocumentFn =
- [this](OperationContext* txn, const NamespaceString& nss, const BSONObj& doc) {
+ [this](OperationContext* opCtx, const NamespaceString& nss, const BSONObj& doc) {
++_storageInterfaceWorkDone.documentsInsertedCount;
return Status::OK();
};
_storageInterface.insertDocumentsFn = [this](
- OperationContext* txn, const NamespaceString& nss, const std::vector<BSONObj>& ops) {
+ OperationContext* opCtx, const NamespaceString& nss, const std::vector<BSONObj>& ops) {
_storageInterfaceWorkDone.insertedOplogEntries = true;
++_storageInterfaceWorkDone.oplogEntriesInserted;
return Status::OK();
};
- _storageInterface.dropCollFn = [this](OperationContext* txn, const NamespaceString& nss) {
+ _storageInterface.dropCollFn = [this](OperationContext* opCtx, const NamespaceString& nss) {
_storageInterfaceWorkDone.droppedCollections.push_back(nss.ns());
return Status::OK();
};
- _storageInterface.dropUserDBsFn = [this](OperationContext* txn) {
+ _storageInterface.dropUserDBsFn = [this](OperationContext* opCtx) {
_storageInterfaceWorkDone.droppedUserDBs = true;
return Status::OK();
};
@@ -728,9 +728,9 @@ TEST_F(DBsClonerTest, DatabaseClonerChecksAdminDbUsingStorageInterfaceAfterCopyi
bool isAdminDbValidFnCalled = false;
OperationContext* isAdminDbValidFnOpCtx = nullptr;
_storageInterface.isAdminDbValidFn = [&isAdminDbValidFnCalled,
- &isAdminDbValidFnOpCtx](OperationContext* txn) {
+ &isAdminDbValidFnOpCtx](OperationContext* opCtx) {
isAdminDbValidFnCalled = true;
- isAdminDbValidFnOpCtx = txn;
+ isAdminDbValidFnOpCtx = opCtx;
return Status::OK();
};
@@ -770,7 +770,7 @@ TEST_F(DBsClonerTest, AdminDbValidationErrorShouldAbortTheCloner) {
Status result = getDetectableErrorStatus();
bool isAdminDbValidFnCalled = false;
- _storageInterface.isAdminDbValidFn = [&isAdminDbValidFnCalled](OperationContext* txn) {
+ _storageInterface.isAdminDbValidFn = [&isAdminDbValidFnCalled](OperationContext* opCtx) {
isAdminDbValidFnCalled = true;
return Status(ErrorCodes::OperationFailed, "admin db invalid");
};
diff --git a/src/mongo/db/repl/initial_sync.cpp b/src/mongo/db/repl/initial_sync.cpp
index 3c010185e64..ba5953f0035 100644
--- a/src/mongo/db/repl/initial_sync.cpp
+++ b/src/mongo/db/repl/initial_sync.cpp
@@ -52,26 +52,26 @@ InitialSync::~InitialSync() {}
/* initial oplog application, during initial sync, after cloning.
*/
-void InitialSync::oplogApplication(OperationContext* txn, const OpTime& endOpTime) {
+void InitialSync::oplogApplication(OperationContext* opCtx, const OpTime& endOpTime) {
if (replSetForceInitialSyncFailure > 0) {
log() << "test code invoked, forced InitialSync failure: "
<< replSetForceInitialSyncFailure;
replSetForceInitialSyncFailure--;
throw DBException("forced error", 0);
}
- _applyOplogUntil(txn, endOpTime);
+ _applyOplogUntil(opCtx, endOpTime);
}
/* applies oplog from "now" until endOpTime using the applier threads for initial sync*/
-void InitialSync::_applyOplogUntil(OperationContext* txn, const OpTime& endOpTime) {
+void InitialSync::_applyOplogUntil(OperationContext* opCtx, const OpTime& endOpTime) {
unsigned long long bytesApplied = 0;
unsigned long long entriesApplied = 0;
while (true) {
OpQueue ops;
- auto replCoord = repl::ReplicationCoordinator::get(txn);
- while (!tryPopAndWaitForMore(txn, &ops, BatchLimits{})) {
+ auto replCoord = repl::ReplicationCoordinator::get(opCtx);
+ while (!tryPopAndWaitForMore(opCtx, &ops, BatchLimits{})) {
if (globalInShutdownDeprecated()) {
return;
}
@@ -108,10 +108,10 @@ void InitialSync::_applyOplogUntil(OperationContext* txn, const OpTime& endOpTim
// Tally operation information and apply batch. Don't use ops again after these lines.
bytesApplied += ops.getBytes();
entriesApplied += ops.getCount();
- const OpTime lastOpTime = multiApply(txn, ops.releaseBatch());
+ const OpTime lastOpTime = multiApply(opCtx, ops.releaseBatch());
replCoord->setMyLastAppliedOpTime(lastOpTime);
- setNewTimestamp(txn->getServiceContext(), lastOpTime.getTimestamp());
+ setNewTimestamp(opCtx->getServiceContext(), lastOpTime.getTimestamp());
if (globalInShutdownDeprecated()) {
return;
diff --git a/src/mongo/db/repl/initial_sync.h b/src/mongo/db/repl/initial_sync.h
index 167038363a7..3afce50a11d 100644
--- a/src/mongo/db/repl/initial_sync.h
+++ b/src/mongo/db/repl/initial_sync.h
@@ -47,7 +47,7 @@ public:
/**
* applies up to endOpTime, fetching missing documents as needed.
*/
- void oplogApplication(OperationContext* txn, const OpTime& endOpTime);
+ void oplogApplication(OperationContext* opCtx, const OpTime& endOpTime);
private:
/**
@@ -55,7 +55,7 @@ private:
*
* NOTE:Will not transition or check states
*/
- void _applyOplogUntil(OperationContext* txn, const OpTime& endOpTime);
+ void _applyOplogUntil(OperationContext* opCtx, const OpTime& endOpTime);
};
// Used for ReplSetTest testing.
diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp
index 7c48ec8671c..10540c0fd3c 100644
--- a/src/mongo/db/repl/master_slave.cpp
+++ b/src/mongo/db/repl/master_slave.cpp
@@ -95,7 +95,7 @@ const int restartSync = 0;
const int restartSyncAfterSleep = 1;
} // namespace
-void pretouchOperation(OperationContext* txn, const BSONObj& op);
+void pretouchOperation(OperationContext* opCtx, const BSONObj& op);
void pretouchN(vector<BSONObj>&, unsigned a, unsigned b);
/* if 1 sync() is running */
@@ -114,12 +114,12 @@ struct ReplInfo {
};
-ReplSource::ReplSource(OperationContext* txn) {
+ReplSource::ReplSource(OperationContext* opCtx) {
nClonedThisPass = 0;
- ensureMe(txn);
+ ensureMe(opCtx);
}
-ReplSource::ReplSource(OperationContext* txn, BSONObj o) : nClonedThisPass(0) {
+ReplSource::ReplSource(OperationContext* opCtx, BSONObj o) : nClonedThisPass(0) {
only = o.getStringField("only");
hostName = o.getStringField("host");
_sourceName = o.getStringField("source");
@@ -155,7 +155,7 @@ ReplSource::ReplSource(OperationContext* txn, BSONObj o) : nClonedThisPass(0) {
incompleteCloneDbs.insert(e.fieldName());
}
}
- ensureMe(txn);
+ ensureMe(opCtx);
}
/* Turn our C++ Source object into a BSONObj */
@@ -189,31 +189,31 @@ BSONObj ReplSource::jsobj() {
return b.obj();
}
-void ReplSource::ensureMe(OperationContext* txn) {
+void ReplSource::ensureMe(OperationContext* opCtx) {
string myname = getHostName();
// local.me is an identifier for a server for getLastError w:2+
- bool exists = Helpers::getSingleton(txn, "local.me", _me);
+ bool exists = Helpers::getSingleton(opCtx, "local.me", _me);
if (!exists || !_me.hasField("host") || _me["host"].String() != myname) {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dblk(txn->lockState(), "local", MODE_X);
- WriteUnitOfWork wunit(txn);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock dblk(opCtx->lockState(), "local", MODE_X);
+ WriteUnitOfWork wunit(opCtx);
// clean out local.me
- Helpers::emptyCollection(txn, "local.me");
+ Helpers::emptyCollection(opCtx, "local.me");
// repopulate
BSONObjBuilder b;
b.appendOID("_id", 0, true);
b.append("host", myname);
_me = b.obj();
- Helpers::putSingleton(txn, "local.me", _me);
+ Helpers::putSingleton(opCtx, "local.me", _me);
wunit.commit();
}
_me = _me.getOwned();
}
-void ReplSource::save(OperationContext* txn) {
+void ReplSource::save(OperationContext* opCtx) {
BSONObjBuilder b;
verify(!hostName.empty());
b.append("host", hostName);
@@ -226,7 +226,7 @@ void ReplSource::save(OperationContext* txn) {
LOG(1) << "Saving repl source: " << o << endl;
{
- OldClientContext ctx(txn, "local.sources", false);
+ OldClientContext ctx(opCtx, "local.sources", false);
const NamespaceString requestNs("local.sources");
UpdateRequest request(requestNs);
@@ -235,14 +235,14 @@ void ReplSource::save(OperationContext* txn) {
request.setUpdates(o);
request.setUpsert();
- UpdateResult res = update(txn, ctx.db(), request);
+ UpdateResult res = update(opCtx, ctx.db(), request);
verify(!res.modifiers);
verify(res.numMatched == 1 || !res.upserted.isEmpty());
}
}
-static void addSourceToList(OperationContext* txn,
+static void addSourceToList(OperationContext* opCtx,
ReplSource::SourceVector& v,
ReplSource& s,
ReplSource::SourceVector& old) {
@@ -263,9 +263,9 @@ static void addSourceToList(OperationContext* txn,
/* we reuse our existing objects so that we can keep our existing connection
and cursor in effect.
*/
-void ReplSource::loadAll(OperationContext* txn, SourceVector& v) {
+void ReplSource::loadAll(OperationContext* opCtx, SourceVector& v) {
const char* localSources = "local.sources";
- OldClientContext ctx(txn, localSources, false);
+ OldClientContext ctx(opCtx, localSources, false);
SourceVector old = v;
v.clear();
@@ -275,13 +275,16 @@ void ReplSource::loadAll(OperationContext* txn, SourceVector& v) {
// check that no items are in sources other than that
// add if missing
int n = 0;
- unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(
- txn, localSources, ctx.db()->getCollection(localSources), PlanExecutor::YIELD_MANUAL));
+ unique_ptr<PlanExecutor> exec(
+ InternalPlanner::collectionScan(opCtx,
+ localSources,
+ ctx.db()->getCollection(localSources),
+ PlanExecutor::YIELD_MANUAL));
BSONObj obj;
PlanExecutor::ExecState state;
while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
n++;
- ReplSource tmp(txn, obj);
+ ReplSource tmp(opCtx, obj);
if (tmp.hostName != replSettings.getSource()) {
log() << "--source " << replSettings.getSource() << " != " << tmp.hostName
<< " from local.sources collection" << endl;
@@ -303,10 +306,10 @@ void ReplSource::loadAll(OperationContext* txn, SourceVector& v) {
uassert(10002, "local.sources collection corrupt?", n < 2);
if (n == 0) {
// source missing. add.
- ReplSource s(txn);
+ ReplSource s(opCtx);
s.hostName = replSettings.getSource();
s.only = replSettings.getOnly();
- s.save(txn);
+ s.save(opCtx);
}
} else {
try {
@@ -317,41 +320,41 @@ void ReplSource::loadAll(OperationContext* txn, SourceVector& v) {
}
unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(
- txn, localSources, ctx.db()->getCollection(localSources), PlanExecutor::YIELD_MANUAL));
+ opCtx, localSources, ctx.db()->getCollection(localSources), PlanExecutor::YIELD_MANUAL));
BSONObj obj;
PlanExecutor::ExecState state;
while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
- ReplSource tmp(txn, obj);
+ ReplSource tmp(opCtx, obj);
if (tmp.syncedTo.isNull()) {
- DBDirectClient c(txn);
+ DBDirectClient c(opCtx);
BSONObj op = c.findOne("local.oplog.$main",
QUERY("op" << NE << "n").sort(BSON("$natural" << -1)));
if (!op.isEmpty()) {
tmp.syncedTo = op["ts"].timestamp();
}
}
- addSourceToList(txn, v, tmp, old);
+ addSourceToList(opCtx, v, tmp, old);
}
uassert(17066, "Internal error reading from local.sources", PlanExecutor::IS_EOF == state);
}
-bool ReplSource::throttledForceResyncDead(OperationContext* txn, const char* requester) {
+bool ReplSource::throttledForceResyncDead(OperationContext* opCtx, const char* requester) {
if (time(0) - lastForcedResync > 600) {
- forceResyncDead(txn, requester);
+ forceResyncDead(opCtx, requester);
lastForcedResync = time(0);
return true;
}
return false;
}
-void ReplSource::forceResyncDead(OperationContext* txn, const char* requester) {
+void ReplSource::forceResyncDead(OperationContext* opCtx, const char* requester) {
if (!replAllDead)
return;
SourceVector sources;
- ReplSource::loadAll(txn, sources);
+ ReplSource::loadAll(opCtx, sources);
for (SourceVector::iterator i = sources.begin(); i != sources.end(); ++i) {
log() << requester << " forcing resync from " << (*i)->hostName << endl;
- (*i)->forceResync(txn, requester);
+ (*i)->forceResync(opCtx, requester);
}
replAllDead = 0;
}
@@ -379,7 +382,7 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& ns,
BSONObj& cmdObj,
int options,
@@ -391,9 +394,9 @@ public:
return appendCommandStatus(result, status);
}
- ReplClientInfo::forClient(txn->getClient()).setRemoteID(handshake.getRid());
+ ReplClientInfo::forClient(opCtx->getClient()).setRemoteID(handshake.getRid());
- status = getGlobalReplicationCoordinator()->processHandshake(txn, handshake);
+ status = getGlobalReplicationCoordinator()->processHandshake(opCtx, handshake);
return appendCommandStatus(result, status);
}
@@ -429,12 +432,12 @@ bool ReplSource::_connect(OplogReader* reader, const HostAndPort& host, const OI
}
-void ReplSource::forceResync(OperationContext* txn, const char* requester) {
+void ReplSource::forceResync(OperationContext* opCtx, const char* requester) {
BSONObj info;
{
// This is always a GlobalWrite lock (so no ns/db used from the context)
- invariant(txn->lockState()->isW());
- Lock::TempRelease tempRelease(txn->lockState());
+ invariant(opCtx->lockState()->isW());
+ Lock::TempRelease tempRelease(opCtx->lockState());
if (!_connect(&oplogReader,
HostAndPort(hostName),
@@ -456,14 +459,14 @@ void ReplSource::forceResync(OperationContext* txn, const char* requester) {
if (!e.embeddedObject().getBoolField("empty")) {
if (name != "local") {
if (only.empty() || only == name) {
- resyncDrop(txn, name);
+ resyncDrop(opCtx, name);
}
}
}
}
syncedTo = Timestamp();
addDbNextPass.clear();
- save(txn);
+ save(opCtx);
}
Status ReplSource::_updateIfDoneWithInitialSync() {
@@ -483,23 +486,23 @@ Status ReplSource::_updateIfDoneWithInitialSync() {
return Status::OK();
}
-void ReplSource::resyncDrop(OperationContext* txn, const string& dbName) {
+void ReplSource::resyncDrop(OperationContext* opCtx, const string& dbName) {
log() << "resync: dropping database " << dbName;
- invariant(txn->lockState()->isW());
+ invariant(opCtx->lockState()->isW());
- Database* const db = dbHolder().get(txn, dbName);
+ Database* const db = dbHolder().get(opCtx, dbName);
if (!db) {
log() << "resync: dropping database " << dbName
<< " - database does not exist. nothing to do.";
return;
}
- Database::dropDatabase(txn, db);
+ Database::dropDatabase(opCtx, db);
}
/* grab initial copy of a database from the master */
-void ReplSource::resync(OperationContext* txn, const std::string& dbName) {
+void ReplSource::resync(OperationContext* opCtx, const std::string& dbName) {
const std::string db(dbName); // need local copy of the name, we're dropping the original
- resyncDrop(txn, db);
+ resyncDrop(opCtx, db);
{
log() << "resync: cloning database " << db << " to get an initial copy" << endl;
@@ -512,11 +515,11 @@ void ReplSource::resync(OperationContext* txn, const std::string& dbName) {
cloneOptions.snapshot = true;
Cloner cloner;
- Status status = cloner.copyDb(txn, db, hostName.c_str(), cloneOptions, NULL);
+ Status status = cloner.copyDb(opCtx, db, hostName.c_str(), cloneOptions, NULL);
if (!status.isOK()) {
if (status.code() == ErrorCodes::DatabaseDifferCase) {
- resyncDrop(txn, db);
+ resyncDrop(opCtx, db);
log() << "resync: database " << db
<< " not valid on the master due to a name conflict, dropping.";
return;
@@ -552,12 +555,12 @@ bool DatabaseIgnorer::ignoreAt(const string& db, const Timestamp& currentOplogTi
}
}
-bool ReplSource::handleDuplicateDbName(OperationContext* txn,
+bool ReplSource::handleDuplicateDbName(OperationContext* opCtx,
const BSONObj& op,
const char* ns,
const char* db) {
// We are already locked at this point
- if (dbHolder().get(txn, ns) != NULL) {
+ if (dbHolder().get(opCtx, ns) != NULL) {
// Database is already present.
return true;
}
@@ -577,8 +580,8 @@ bool ReplSource::handleDuplicateDbName(OperationContext* txn,
bool dbOk = false;
{
// This is always a GlobalWrite lock (so no ns/db used from the context)
- invariant(txn->lockState()->isW());
- Lock::TempRelease tempRelease(txn->lockState());
+ invariant(opCtx->lockState()->isW());
+ Lock::TempRelease tempRelease(opCtx->lockState());
// We always log an operation after executing it (never before), so
// a database list will always be valid as of an oplog entry generated
@@ -634,8 +637,8 @@ bool ReplSource::handleDuplicateDbName(OperationContext* txn,
incompleteCloneDbs.erase(*i);
addDbNextPass.erase(*i);
- AutoGetDb autoDb(txn, *i, MODE_X);
- Database::dropDatabase(txn, autoDb.getDb());
+ AutoGetDb autoDb(opCtx, *i, MODE_X);
+ Database::dropDatabase(opCtx, autoDb.getDb());
}
massert(14034,
@@ -644,16 +647,16 @@ bool ReplSource::handleDuplicateDbName(OperationContext* txn,
return true;
}
-void ReplSource::applyCommand(OperationContext* txn, const BSONObj& op) {
+void ReplSource::applyCommand(OperationContext* opCtx, const BSONObj& op) {
try {
- Status status = applyCommand_inlock(txn, op, true);
+ Status status = applyCommand_inlock(opCtx, op, true);
if (!status.isOK()) {
SyncTail sync(nullptr, SyncTail::MultiSyncApplyFunc());
sync.setHostname(hostName);
- if (sync.shouldRetry(txn, op)) {
+ if (sync.shouldRetry(opCtx, op)) {
uassert(28639,
"Failure retrying initial sync update",
- applyCommand_inlock(txn, op, true).isOK());
+ applyCommand_inlock(opCtx, op, true).isOK());
}
}
} catch (UserException& e) {
@@ -667,16 +670,16 @@ void ReplSource::applyCommand(OperationContext* txn, const BSONObj& op) {
}
}
-void ReplSource::applyOperation(OperationContext* txn, Database* db, const BSONObj& op) {
+void ReplSource::applyOperation(OperationContext* opCtx, Database* db, const BSONObj& op) {
try {
- Status status = applyOperation_inlock(txn, db, op);
+ Status status = applyOperation_inlock(opCtx, db, op);
if (!status.isOK()) {
SyncTail sync(nullptr, SyncTail::MultiSyncApplyFunc());
sync.setHostname(hostName);
- if (sync.shouldRetry(txn, op)) {
+ if (sync.shouldRetry(opCtx, op)) {
uassert(15914,
"Failure retrying initial sync update",
- applyOperation_inlock(txn, db, op).isOK());
+ applyOperation_inlock(opCtx, db, op).isOK());
}
}
} catch (UserException& e) {
@@ -697,7 +700,7 @@ void ReplSource::applyOperation(OperationContext* txn, Database* db, const BSONO
@param alreadyLocked caller already put us in write lock if true
*/
-void ReplSource::_sync_pullOpLog_applyOperation(OperationContext* txn,
+void ReplSource::_sync_pullOpLog_applyOperation(OperationContext* opCtx,
BSONObj& op,
bool alreadyLocked) {
LOG(6) << "processing op: " << redact(op) << endl;
@@ -725,10 +728,10 @@ void ReplSource::_sync_pullOpLog_applyOperation(OperationContext* txn,
if (!only.empty() && only != dbName)
return;
- // Push the CurOp stack for "txn" so each individual oplog entry application is separately
+ // Push the CurOp stack for "opCtx" so each individual oplog entry application is separately
// reported.
- CurOp individualOp(txn);
- txn->setReplicatedWrites(false);
+ CurOp individualOp(opCtx);
+ opCtx->setReplicatedWrites(false);
const ReplSettings& replSettings = getGlobalReplicationCoordinator()->getSettings();
if (replSettings.getPretouch() &&
!alreadyLocked /*doesn't make sense if in write lock already*/) {
@@ -759,16 +762,16 @@ void ReplSource::_sync_pullOpLog_applyOperation(OperationContext* txn,
a += m;
}
// we do one too...
- pretouchOperation(txn, op);
+ pretouchOperation(opCtx, op);
tp->join();
countdown = v.size();
}
} else {
- pretouchOperation(txn, op);
+ pretouchOperation(opCtx, op);
}
}
- unique_ptr<Lock::GlobalWrite> lk(alreadyLocked ? 0 : new Lock::GlobalWrite(txn->lockState()));
+ unique_ptr<Lock::GlobalWrite> lk(alreadyLocked ? 0 : new Lock::GlobalWrite(opCtx->lockState()));
if (replAllDead) {
// hmmm why is this check here and not at top of this function? does it get set between top
@@ -777,20 +780,20 @@ void ReplSource::_sync_pullOpLog_applyOperation(OperationContext* txn,
throw SyncException();
}
- if (!handleDuplicateDbName(txn, op, ns, dbName)) {
+ if (!handleDuplicateDbName(opCtx, op, ns, dbName)) {
return;
}
// special case apply for commands to avoid implicit database creation
if (*op.getStringField("op") == 'c') {
- applyCommand(txn, op);
+ applyCommand(opCtx, op);
return;
}
// This code executes on the slaves only, so it doesn't need to be sharding-aware since
// mongos will not send requests there. That's why the last argument is false (do not do
// version checking).
- OldClientContext ctx(txn, ns, false);
+ OldClientContext ctx(opCtx, ns, false);
bool empty = !ctx.db()->getDatabaseCatalogEntry()->hasUserData();
bool incompleteClone = incompleteCloneDbs.count(dbName) != 0;
@@ -813,16 +816,16 @@ void ReplSource::_sync_pullOpLog_applyOperation(OperationContext* txn,
log() << "An earlier initial clone of '" << dbName
<< "' did not complete, now resyncing." << endl;
}
- save(txn);
- OldClientContext ctx(txn, ns, false);
+ save(opCtx);
+ OldClientContext ctx(opCtx, ns, false);
nClonedThisPass++;
- resync(txn, ctx.db()->name());
+ resync(opCtx, ctx.db()->name());
addDbNextPass.erase(dbName);
incompleteCloneDbs.erase(dbName);
}
- save(txn);
+ save(opCtx);
} else {
- applyOperation(txn, ctx.db(), op);
+ applyOperation(opCtx, ctx.db(), op);
addDbNextPass.erase(dbName);
}
}
@@ -877,7 +880,7 @@ public:
0 ok, don't sleep
1 ok, sleep
*/
-int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) {
+int ReplSource::_sync_pullOpLog(OperationContext* opCtx, int& nApplied) {
int okResultCode = restartSyncAfterSleep;
string ns = string("local.oplog.$") + sourceName();
LOG(2) << "sync_pullOpLog " << ns << " syncedTo:" << syncedTo.toStringLong() << '\n';
@@ -911,9 +914,9 @@ int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) {
}
// obviously global isn't ideal, but non-repl set is old so
// keeping it simple
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
- save(txn);
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite lk(opCtx->lockState());
+ save(opCtx);
}
BSONObjBuilder gte;
@@ -948,7 +951,7 @@ int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) {
b.append("ns", *i + '.');
b.append("op", "db");
BSONObj op = b.done();
- _sync_pullOpLog_applyOperation(txn, op, false);
+ _sync_pullOpLog_applyOperation(opCtx, op, false);
}
}
@@ -974,9 +977,9 @@ int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) {
log() << ns << " oplog is empty" << endl;
}
{
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
- save(txn);
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite lk(opCtx->lockState());
+ save(opCtx);
}
return okResultCode;
}
@@ -1042,26 +1045,26 @@ int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) {
const bool moreInitialSyncsPending = !addDbNextPass.empty() && n;
if (moreInitialSyncsPending || !oplogReader.more()) {
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite lk(opCtx->lockState());
if (tailing) {
okResultCode = restartSync; // don't sleep
}
syncedTo = nextOpTime;
- save(txn); // note how far we are synced up to now
+ save(opCtx); // note how far we are synced up to now
nApplied = n;
break;
}
OCCASIONALLY if (n > 0 && (n > 100000 || time(0) - saveLast > 60)) {
// periodically note our progress, in case we are doing a lot of work and crash
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite lk(opCtx->lockState());
syncedTo = nextOpTime;
// can't update local log ts since there are pending operations from our peer
- save(txn);
+ save(opCtx);
log() << "checkpoint applied " << n << " operations" << endl;
log() << "syncedTo: " << syncedTo.toStringLong() << endl;
saveLast = time(0);
@@ -1072,7 +1075,8 @@ int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) {
int b = replApplyBatchSize.load();
bool justOne = b == 1;
- unique_ptr<Lock::GlobalWrite> lk(justOne ? 0 : new Lock::GlobalWrite(txn->lockState()));
+ unique_ptr<Lock::GlobalWrite> lk(justOne ? 0
+ : new Lock::GlobalWrite(opCtx->lockState()));
while (1) {
BSONElement ts = op.getField("ts");
if (!(ts.type() == Date || ts.type() == bsonTimestamp)) {
@@ -1104,11 +1108,11 @@ int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) {
oplogReader.putBack(op);
_sleepAdviceTime = nextOpTime.getSecs() +
durationCount<Seconds>(replSettings.getSlaveDelaySecs()) + 1;
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite lk(opCtx->lockState());
if (n > 0) {
syncedTo = last;
- save(txn);
+ save(opCtx);
}
log() << "applied " << n << " operations" << endl;
log() << "syncedTo: " << syncedTo.toStringLong() << endl;
@@ -1116,7 +1120,7 @@ int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) {
return okResultCode;
}
- _sync_pullOpLog_applyOperation(txn, op, !justOne);
+ _sync_pullOpLog_applyOperation(opCtx, op, !justOne);
n++;
if (--b == 0)
@@ -1139,7 +1143,7 @@ int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) {
returns >= 0 if ok. return -1 if you want to reconnect.
return value of zero indicates no sleep necessary before next call
*/
-int ReplSource::sync(OperationContext* txn, int& nApplied) {
+int ReplSource::sync(OperationContext* opCtx, int& nApplied) {
_sleepAdviceTime = 0;
ReplInfo r("sync");
if (!serverGlobalParams.quiet.load()) {
@@ -1167,7 +1171,7 @@ int ReplSource::sync(OperationContext* txn, int& nApplied) {
return -1;
}
- return _sync_pullOpLog(txn, nApplied);
+ return _sync_pullOpLog(opCtx, nApplied);
}
/* --------------------------------------------------------------*/
@@ -1184,12 +1188,12 @@ _ reuse that cursor when we can
0 = no sleep recommended
1 = special sentinel indicating adaptive sleep recommended
*/
-int _replMain(OperationContext* txn, ReplSource::SourceVector& sources, int& nApplied) {
+int _replMain(OperationContext* opCtx, ReplSource::SourceVector& sources, int& nApplied) {
{
ReplInfo r("replMain load sources");
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
- ReplSource::loadAll(txn, sources);
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite lk(opCtx->lockState());
+ ReplSource::loadAll(opCtx, sources);
// only need this param for initial reset
_replMainStarted = true;
@@ -1208,7 +1212,7 @@ int _replMain(OperationContext* txn, ReplSource::SourceVector& sources, int& nAp
ReplSource* s = i->get();
int res = forceReconnect;
try {
- res = s->sync(txn, nApplied);
+ res = s->sync(opCtx, nApplied);
bool moreToSync = s->haveMoreDbsToSync();
if (res < 0) {
sleepAdvice = 3;
@@ -1245,17 +1249,17 @@ int _replMain(OperationContext* txn, ReplSource::SourceVector& sources, int& nAp
return sleepAdvice;
}
-static void replMain(OperationContext* txn) {
+static void replMain(OperationContext* opCtx) {
ReplSource::SourceVector sources;
while (1) {
auto s = restartSync;
{
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite lk(opCtx->lockState());
if (replAllDead) {
// throttledForceResyncDead can throw
if (!getGlobalReplicationCoordinator()->getSettings().isAutoResyncEnabled() ||
- !ReplSource::throttledForceResyncDead(txn, "auto")) {
+ !ReplSource::throttledForceResyncDead(opCtx, "auto")) {
log() << "all sources dead: " << replAllDead << ", sleeping for 5 seconds"
<< endl;
break;
@@ -1268,7 +1272,7 @@ static void replMain(OperationContext* txn) {
try {
int nApplied = 0;
- s = _replMain(txn, sources, nApplied);
+ s = _replMain(opCtx, sources, nApplied);
if (s == restartSyncAfterSleep) {
if (nApplied == 0)
s = 2;
@@ -1284,8 +1288,8 @@ static void replMain(OperationContext* txn) {
}
{
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite lk(opCtx->lockState());
invariant(syncing.swap(0) == 1);
}
@@ -1316,17 +1320,17 @@ static void replMasterThread() {
// Write a keep-alive like entry to the log. This will make things like
// printReplicationStatus() and printSlaveReplicationStatus() stay up-to-date even
// when things are idle.
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- AuthorizationSession::get(txn.getClient())->grantInternalAuthorization();
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ AuthorizationSession::get(opCtx.getClient())->grantInternalAuthorization();
- Lock::GlobalWrite globalWrite(txn.lockState(), 1);
+ Lock::GlobalWrite globalWrite(opCtx.lockState(), 1);
if (globalWrite.isLocked()) {
toSleep = 10;
try {
- WriteUnitOfWork wuow(&txn);
- getGlobalServiceContext()->getOpObserver()->onOpMessage(&txn, BSONObj());
+ WriteUnitOfWork wuow(&opCtx);
+ getGlobalServiceContext()->getOpObserver()->onOpMessage(&opCtx, BSONObj());
wuow.commit();
} catch (...) {
log() << "caught exception in replMasterThread()" << endl;
@@ -1342,14 +1346,14 @@ static void replSlaveThread() {
sleepsecs(1);
Client::initThread("replslave");
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- AuthorizationSession::get(txn.getClient())->grantInternalAuthorization();
- DisableDocumentValidation validationDisabler(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ AuthorizationSession::get(opCtx.getClient())->grantInternalAuthorization();
+ DisableDocumentValidation validationDisabler(&opCtx);
while (1) {
try {
- replMain(&txn);
+ replMain(&opCtx);
sleepsecs(5);
} catch (AssertionException&) {
ReplInfo r("Assertion in replSlaveThread(): sleeping 5 minutes before retry");
@@ -1366,15 +1370,15 @@ static void replSlaveThread() {
}
}
-void startMasterSlave(OperationContext* txn) {
+void startMasterSlave(OperationContext* opCtx) {
const ReplSettings& replSettings = getGlobalReplicationCoordinator()->getSettings();
if (!replSettings.isSlave() && !replSettings.isMaster())
return;
- AuthorizationSession::get(txn->getClient())->grantInternalAuthorization();
+ AuthorizationSession::get(opCtx->getClient())->grantInternalAuthorization();
{
- ReplSource temp(txn); // Ensures local.me is populated
+ ReplSource temp(opCtx); // Ensures local.me is populated
}
if (replSettings.isSlave()) {
@@ -1385,7 +1389,7 @@ void startMasterSlave(OperationContext* txn) {
if (replSettings.isMaster()) {
LOG(1) << "master=true" << endl;
- createOplog(txn);
+ createOplog(opCtx);
stdx::thread t(replMasterThread);
t.detach();
}
@@ -1400,10 +1404,10 @@ int _dummy_z;
void pretouchN(vector<BSONObj>& v, unsigned a, unsigned b) {
Client::initThreadIfNotAlready("pretouchN");
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr; // XXX
- ScopedTransaction transaction(&txn, MODE_S);
- Lock::GlobalRead lk(txn.lockState());
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr; // XXX
+ ScopedTransaction transaction(&opCtx, MODE_S);
+ Lock::GlobalRead lk(opCtx.lockState());
for (unsigned i = a; i <= b; i++) {
const BSONObj& op = v[i];
@@ -1425,8 +1429,8 @@ void pretouchN(vector<BSONObj>& v, unsigned a, unsigned b) {
BSONObjBuilder b;
b.append(_id);
BSONObj result;
- OldClientContext ctx(&txn, ns, false);
- if (Helpers::findById(&txn, ctx.db(), ns, b.done(), result))
+ OldClientContext ctx(&opCtx, ns, false);
+ if (Helpers::findById(&opCtx, ctx.db(), ns, b.done(), result))
_dummy_z += result.objsize(); // touch
}
} catch (DBException& e) {
@@ -1436,8 +1440,8 @@ void pretouchN(vector<BSONObj>& v, unsigned a, unsigned b) {
}
}
-void pretouchOperation(OperationContext* txn, const BSONObj& op) {
- if (txn->lockState()->isWriteLocked()) {
+void pretouchOperation(OperationContext* opCtx, const BSONObj& op) {
+ if (opCtx->lockState()->isWriteLocked()) {
// no point pretouching if write locked. not sure if this will ever fire, but just in case.
return;
}
@@ -1460,8 +1464,8 @@ void pretouchOperation(OperationContext* txn, const BSONObj& op) {
BSONObjBuilder b;
b.append(_id);
BSONObj result;
- AutoGetCollectionForRead ctx(txn, NamespaceString(ns));
- if (Helpers::findById(txn, ctx.getDb(), ns, b.done(), result)) {
+ AutoGetCollectionForRead ctx(opCtx, NamespaceString(ns));
+ if (Helpers::findById(opCtx, ctx.getDb(), ns, b.done(), result)) {
_dummy_z += result.objsize(); // touch
}
}
diff --git a/src/mongo/db/repl/master_slave.h b/src/mongo/db/repl/master_slave.h
index 3a869a750ba..627223efd16 100644
--- a/src/mongo/db/repl/master_slave.h
+++ b/src/mongo/db/repl/master_slave.h
@@ -50,7 +50,7 @@ class OperationContext;
namespace repl {
// Main entry point for master/slave at startup time.
-void startMasterSlave(OperationContext* txn);
+void startMasterSlave(OperationContext* opCtx);
// externed for use with resync.cpp
extern AtomicInt32 relinquishSyncingSome;
@@ -78,15 +78,15 @@ public:
class ReplSource {
std::shared_ptr<OldThreadPool> tp;
- void resync(OperationContext* txn, const std::string& dbName);
+ void resync(OperationContext* opCtx, const std::string& dbName);
/** @param alreadyLocked caller already put us in write lock if true */
- void _sync_pullOpLog_applyOperation(OperationContext* txn, BSONObj& op, bool alreadyLocked);
+ void _sync_pullOpLog_applyOperation(OperationContext* opCtx, BSONObj& op, bool alreadyLocked);
/* pull some operations from the master's oplog, and apply them.
calls sync_pullOpLog_applyOperation
*/
- int _sync_pullOpLog(OperationContext* txn, int& nApplied);
+ int _sync_pullOpLog(OperationContext* opCtx, int& nApplied);
/* we only clone one database per pass, even if a lot need done. This helps us
avoid overflowing the master's transaction log by doing too much work before going
@@ -106,7 +106,7 @@ class ReplSource {
*/
bool _doHandshake = false;
- void resyncDrop(OperationContext* txn, const std::string& dbName);
+ void resyncDrop(OperationContext* opCtx, const std::string& dbName);
// call without the db mutex
void syncToTailOfRemoteLog();
std::string ns() const {
@@ -120,16 +120,16 @@ class ReplSource {
* master.
* @return true iff an op with the specified ns may be applied.
*/
- bool handleDuplicateDbName(OperationContext* txn,
+ bool handleDuplicateDbName(OperationContext* opCtx,
const BSONObj& op,
const char* ns,
const char* db);
// populates _me so that it can be passed to oplogreader for handshakes
/// TODO(spencer): Remove this function once the LegacyReplicationCoordinator is gone.
- void ensureMe(OperationContext* txn);
+ void ensureMe(OperationContext* opCtx);
- void forceResync(OperationContext* txn, const char* requester);
+ void forceResync(OperationContext* opCtx, const char* requester);
bool _connect(OplogReader* reader, const HostAndPort& host, const OID& myRID);
@@ -138,8 +138,8 @@ class ReplSource {
public:
OplogReader oplogReader;
- void applyCommand(OperationContext* txn, const BSONObj& op);
- void applyOperation(OperationContext* txn, Database* db, const BSONObj& op);
+ void applyCommand(OperationContext* opCtx, const BSONObj& op);
+ void applyOperation(OperationContext* opCtx, Database* db, const BSONObj& op);
std::string hostName; // ip addr or hostname plus optionally, ":<port>"
std::string _sourceName; // a logical source name.
std::string sourceName() const {
@@ -156,18 +156,18 @@ public:
int nClonedThisPass;
typedef std::vector<std::shared_ptr<ReplSource>> SourceVector;
- static void loadAll(OperationContext* txn, SourceVector&);
+ static void loadAll(OperationContext* opCtx, SourceVector&);
- explicit ReplSource(OperationContext* txn, BSONObj);
+ explicit ReplSource(OperationContext* opCtx, BSONObj);
// This is not the constructor you are looking for. Always prefer the version that takes
// a BSONObj. This is public only as a hack so that the ReplicationCoordinator can find
// out the process's RID in master/slave setups.
- ReplSource(OperationContext* txn);
+ ReplSource(OperationContext* opCtx);
/* -1 = error */
- int sync(OperationContext* txn, int& nApplied);
+ int sync(OperationContext* opCtx, int& nApplied);
- void save(OperationContext* txn); // write ourself to local.sources
+ void save(OperationContext* opCtx); // write ourself to local.sources
// make a jsobj from our member fields of the form
// { host: ..., source: ..., syncedTo: ... }
@@ -190,8 +190,8 @@ public:
return wait > 0 ? wait : 0;
}
- static bool throttledForceResyncDead(OperationContext* txn, const char* requester);
- static void forceResyncDead(OperationContext* txn, const char* requester);
+ static bool throttledForceResyncDead(OperationContext* opCtx, const char* requester);
+ static void forceResyncDead(OperationContext* opCtx, const char* requester);
};
/**
diff --git a/src/mongo/db/repl/multiapplier.cpp b/src/mongo/db/repl/multiapplier.cpp
index 036ab4f0d4d..ac78a4f2c8c 100644
--- a/src/mongo/db/repl/multiapplier.cpp
+++ b/src/mongo/db/repl/multiapplier.cpp
@@ -159,8 +159,8 @@ void MultiApplier::_callback(const executor::TaskExecutor::CallbackArgs& cbd) {
StatusWith<OpTime> applyStatus(ErrorCodes::InternalError, "not mutated");
try {
- auto txn = cc().makeOperationContext();
- applyStatus = _multiApply(txn.get(), _operations, _applyOperation);
+ auto opCtx = cc().makeOperationContext();
+ applyStatus = _multiApply(opCtx.get(), _operations, _applyOperation);
} catch (...) {
applyStatus = exceptionToStatus();
}
diff --git a/src/mongo/db/repl/multiapplier_test.cpp b/src/mongo/db/repl/multiapplier_test.cpp
index 9df868384c2..dcbd6979adf 100644
--- a/src/mongo/db/repl/multiapplier_test.cpp
+++ b/src/mongo/db/repl/multiapplier_test.cpp
@@ -156,7 +156,7 @@ TEST_F(MultiApplierTest, MultiApplierInvokesCallbackWithCallbackCanceledStatusUp
const MultiApplier::Operations operations{OplogEntry(BSON("ts" << Timestamp(Seconds(123), 0)))};
bool multiApplyInvoked = false;
- auto multiApply = [&](OperationContext* txn,
+ auto multiApply = [&](OperationContext* opCtx,
MultiApplier::Operations operations,
MultiApplier::ApplyOperationFn) -> StatusWith<OpTime> {
multiApplyInvoked = true;
@@ -223,7 +223,7 @@ TEST_F(MultiApplierTest, MultiApplierCatchesMultiApplyExceptionAndConvertsToCall
bool multiApplyInvoked = false;
Status multiApplyError(ErrorCodes::OperationFailed, "multi apply failed");
- auto multiApply = [&](OperationContext* txn,
+ auto multiApply = [&](OperationContext* opCtx,
MultiApplier::Operations operations,
MultiApplier::ApplyOperationFn) -> StatusWith<OpTime> {
multiApplyInvoked = true;
@@ -255,10 +255,10 @@ TEST_F(
OperationContext* multiApplyTxn = nullptr;
MultiApplier::Operations operationsToApply;
- auto multiApply = [&](OperationContext* txn,
+ auto multiApply = [&](OperationContext* opCtx,
MultiApplier::Operations operations,
MultiApplier::ApplyOperationFn) -> StatusWith<OpTime> {
- multiApplyTxn = txn;
+ multiApplyTxn = opCtx;
operationsToApply = operations;
return operationsToApply.back().getOpTime();
};
diff --git a/src/mongo/db/repl/noop_writer.cpp b/src/mongo/db/repl/noop_writer.cpp
index c12425fd423..f127b236614 100644
--- a/src/mongo/db/repl/noop_writer.cpp
+++ b/src/mongo/db/repl/noop_writer.cpp
@@ -77,8 +77,8 @@ private:
void run(Seconds waitTime, NoopWriteFn noopWrite) {
Client::initThread("NoopWriter");
while (true) {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
{
stdx::unique_lock<stdx::mutex> lk(_mutex);
_cv.wait_for(lk, waitTime.toSystemDuration(), [&] { return _inShutdown; });
@@ -86,7 +86,7 @@ private:
if (_inShutdown)
return;
}
- noopWrite(&txn);
+ noopWrite(&opCtx);
}
}
@@ -126,7 +126,7 @@ Status NoopWriter::startWritingPeriodicNoops(OpTime lastKnownOpTime) {
invariant(!_noopRunner);
_noopRunner = stdx::make_unique<PeriodicNoopRunner>(
- _writeInterval, [this](OperationContext* txn) { _writeNoop(txn); });
+ _writeInterval, [this](OperationContext* opCtx) { _writeNoop(opCtx); });
return Status::OK();
}
@@ -135,20 +135,20 @@ void NoopWriter::stopWritingPeriodicNoops() {
_noopRunner.reset();
}
-void NoopWriter::_writeNoop(OperationContext* txn) {
- ScopedTransaction transaction(txn, MODE_IX);
+void NoopWriter::_writeNoop(OperationContext* opCtx) {
+ ScopedTransaction transaction(opCtx, MODE_IX);
// Use GlobalLock + lockMMAPV1Flush instead of DBLock to allow return when the lock is not
// available. It may happen when the primary steps down and a shared global lock is acquired.
- Lock::GlobalLock lock(txn->lockState(), MODE_IX, 1);
+ Lock::GlobalLock lock(opCtx->lockState(), MODE_IX, 1);
if (!lock.isLocked()) {
LOG(1) << "Global lock is not available skipping noopWrite";
return;
}
- txn->lockState()->lockMMAPV1Flush();
+ opCtx->lockState()->lockMMAPV1Flush();
- auto replCoord = ReplicationCoordinator::get(txn);
+ auto replCoord = ReplicationCoordinator::get(opCtx);
// Its a proxy for being a primary
- if (!replCoord->canAcceptWritesForDatabase(txn, "admin")) {
+ if (!replCoord->canAcceptWritesForDatabase(opCtx, "admin")) {
LOG(1) << "Not a primary, skipping the noop write";
return;
}
@@ -166,11 +166,12 @@ void NoopWriter::_writeNoop(OperationContext* txn) {
<< "Writing noop to oplog as there has been no writes to this replica set in over "
<< _writeInterval;
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork uow(txn);
- txn->getClient()->getServiceContext()->getOpObserver()->onOpMessage(txn, kMsgObj);
+ WriteUnitOfWork uow(opCtx);
+ opCtx->getClient()->getServiceContext()->getOpObserver()->onOpMessage(opCtx,
+ kMsgObj);
uow.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "writeNoop", rsOplogName);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "writeNoop", rsOplogName);
}
}
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index e394df05efd..9db957bb00c 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -145,13 +145,13 @@ struct OplogSlot {
* function registers the new optime with the storage system and the replication coordinator,
* and provides no facility to revert those registrations on rollback.
*/
-void getNextOpTime(OperationContext* txn,
+void getNextOpTime(OperationContext* opCtx,
Collection* oplog,
ReplicationCoordinator* replCoord,
ReplicationCoordinator::Mode replicationMode,
unsigned count,
OplogSlot* slotsOut) {
- synchronizeOnCappedInFlightResource(txn->lockState(), oplog->ns());
+ synchronizeOnCappedInFlightResource(opCtx->lockState(), oplog->ns());
long long term = OpTime::kUninitializedTerm;
// Fetch term out of the newOpMutex.
@@ -163,10 +163,10 @@ void getNextOpTime(OperationContext* txn,
stdx::lock_guard<stdx::mutex> lk(newOpMutex);
- auto ts = LogicalClock::get(txn)->reserveTicks(count).asTimestamp();
+ auto ts = LogicalClock::get(opCtx)->reserveTicks(count).asTimestamp();
newTimestampNotifier.notify_all();
- fassert(28560, oplog->getRecordStore()->oplogDiskLocRegister(txn, ts));
+ fassert(28560, oplog->getRecordStore()->oplogDiskLocRegister(opCtx, ts));
// Set hash if we're in replset mode, otherwise it remains 0 in master/slave.
const bool needHash = (replicationMode == ReplicationCoordinator::modeReplSet);
@@ -229,11 +229,12 @@ void setOplogCollectionName() {
namespace {
-Collection* getLocalOplogCollection(OperationContext* txn, const std::string& oplogCollectionName) {
+Collection* getLocalOplogCollection(OperationContext* opCtx,
+ const std::string& oplogCollectionName) {
if (_localOplogCollection)
return _localOplogCollection;
- AutoGetCollection autoColl(txn, NamespaceString(oplogCollectionName), MODE_IX);
+ AutoGetCollection autoColl(opCtx, NamespaceString(oplogCollectionName), MODE_IX);
_localOplogCollection = autoColl.getCollection();
massert(13347,
"the oplog collection " + oplogCollectionName +
@@ -243,7 +244,7 @@ Collection* getLocalOplogCollection(OperationContext* txn, const std::string& op
return _localOplogCollection;
}
-bool oplogDisabled(OperationContext* txn,
+bool oplogDisabled(OperationContext* opCtx,
ReplicationCoordinator::Mode replicationMode,
const NamespaceString& nss) {
if (replicationMode == ReplicationCoordinator::modeNone)
@@ -255,15 +256,15 @@ bool oplogDisabled(OperationContext* txn,
if (nss.isSystemDotProfile())
return true;
- if (!txn->writesAreReplicated())
+ if (!opCtx->writesAreReplicated())
return true;
- fassert(28626, txn->recoveryUnit());
+ fassert(28626, opCtx->recoveryUnit());
return false;
}
-OplogDocWriter _logOpWriter(OperationContext* txn,
+OplogDocWriter _logOpWriter(OperationContext* opCtx,
const char* opstr,
const NamespaceString& nss,
const BSONObj& obj,
@@ -290,11 +291,11 @@ OplogDocWriter _logOpWriter(OperationContext* txn,
} // end anon namespace
// Truncates the oplog after and including the "truncateTimestamp" entry.
-void truncateOplogTo(OperationContext* txn, Timestamp truncateTimestamp) {
+void truncateOplogTo(OperationContext* opCtx, Timestamp truncateTimestamp) {
const NamespaceString oplogNss(rsOplogName);
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetDb autoDb(txn, oplogNss.db(), MODE_IX);
- Lock::CollectionLock oplogCollectionLoc(txn->lockState(), oplogNss.ns(), MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ AutoGetDb autoDb(opCtx, oplogNss.db(), MODE_IX);
+ Lock::CollectionLock oplogCollectionLoc(opCtx->lockState(), oplogNss.ns(), MODE_X);
Collection* oplogCollection = autoDb.getDb()->getCollection(oplogNss);
if (!oplogCollection) {
fassertFailedWithStatusNoTrace(
@@ -305,7 +306,7 @@ void truncateOplogTo(OperationContext* txn, Timestamp truncateTimestamp) {
// Scan through oplog in reverse, from latest entry to first, to find the truncateTimestamp.
RecordId oldestIDToDelete; // Non-null if there is something to delete.
auto oplogRs = oplogCollection->getRecordStore();
- auto oplogReverseCursor = oplogRs->getCursor(txn, /*forward=*/false);
+ auto oplogReverseCursor = oplogRs->getCursor(opCtx, /*forward=*/false);
size_t count = 0;
while (auto next = oplogReverseCursor->next()) {
const BSONObj entry = next->data.releaseToBson();
@@ -325,7 +326,7 @@ void truncateOplogTo(OperationContext* txn, Timestamp truncateTimestamp) {
// oplog is < truncateTimestamp.
if (count != 1) {
invariant(!oldestIDToDelete.isNull());
- oplogCollection->cappedTruncateAfter(txn, oldestIDToDelete, /*inclusive=*/true);
+ oplogCollection->cappedTruncateAfter(opCtx, oldestIDToDelete, /*inclusive=*/true);
}
return;
}
@@ -356,7 +357,7 @@ void truncateOplogTo(OperationContext* txn, Timestamp truncateTimestamp) {
if not null, specifies a boolean to pass along to the other side as b: param.
used for "justOne" or "upsert" flags on 'd', 'u'
*/
-void _logOpsInner(OperationContext* txn,
+void _logOpsInner(OperationContext* opCtx,
const NamespaceString& nss,
const DocWriter* const* writers,
size_t nWriters,
@@ -366,79 +367,80 @@ void _logOpsInner(OperationContext* txn,
ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
if (nss.size() && replicationMode == ReplicationCoordinator::modeReplSet &&
- !replCoord->canAcceptWritesFor(txn, nss)) {
+ !replCoord->canAcceptWritesFor(opCtx, nss)) {
severe() << "logOp() but can't accept write to collection " << nss.ns();
fassertFailed(17405);
}
// we jump through a bunch of hoops here to avoid copying the obj buffer twice --
// instead we do a single copy to the destination in the record store.
- checkOplogInsert(oplogCollection->insertDocumentsForOplog(txn, writers, nWriters));
+ checkOplogInsert(oplogCollection->insertDocumentsForOplog(opCtx, writers, nWriters));
// Set replCoord last optime only after we're sure the WUOW didn't abort and roll back.
- txn->recoveryUnit()->onCommit([txn, replCoord, finalOpTime] {
+ opCtx->recoveryUnit()->onCommit([opCtx, replCoord, finalOpTime] {
replCoord->setMyLastAppliedOpTimeForward(finalOpTime);
- ReplClientInfo::forClient(txn->getClient()).setLastOp(finalOpTime);
+ ReplClientInfo::forClient(opCtx->getClient()).setLastOp(finalOpTime);
});
}
-void logOp(OperationContext* txn,
+void logOp(OperationContext* opCtx,
const char* opstr,
const char* ns,
const BSONObj& obj,
const BSONObj* o2,
bool fromMigrate) {
- ReplicationCoordinator::Mode replMode = ReplicationCoordinator::get(txn)->getReplicationMode();
+ ReplicationCoordinator::Mode replMode =
+ ReplicationCoordinator::get(opCtx)->getReplicationMode();
NamespaceString nss(ns);
- if (oplogDisabled(txn, replMode, nss))
+ if (oplogDisabled(opCtx, replMode, nss))
return;
ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
- Collection* oplog = getLocalOplogCollection(txn, _oplogCollectionName);
- Lock::DBLock lk(txn->lockState(), "local", MODE_IX);
- Lock::CollectionLock lock(txn->lockState(), _oplogCollectionName, MODE_IX);
+ Collection* oplog = getLocalOplogCollection(opCtx, _oplogCollectionName);
+ Lock::DBLock lk(opCtx->lockState(), "local", MODE_IX);
+ Lock::CollectionLock lock(opCtx->lockState(), _oplogCollectionName, MODE_IX);
OplogSlot slot;
- getNextOpTime(txn, oplog, replCoord, replMode, 1, &slot);
- auto writer = _logOpWriter(txn, opstr, nss, obj, o2, fromMigrate, slot.opTime, slot.hash);
+ getNextOpTime(opCtx, oplog, replCoord, replMode, 1, &slot);
+ auto writer = _logOpWriter(opCtx, opstr, nss, obj, o2, fromMigrate, slot.opTime, slot.hash);
const DocWriter* basePtr = &writer;
- _logOpsInner(txn, nss, &basePtr, 1, oplog, replMode, slot.opTime);
+ _logOpsInner(opCtx, nss, &basePtr, 1, oplog, replMode, slot.opTime);
}
-void logOps(OperationContext* txn,
+void logOps(OperationContext* opCtx,
const char* opstr,
const NamespaceString& nss,
std::vector<BSONObj>::const_iterator begin,
std::vector<BSONObj>::const_iterator end,
bool fromMigrate) {
- ReplicationCoordinator* replCoord = ReplicationCoordinator::get(txn);
+ ReplicationCoordinator* replCoord = ReplicationCoordinator::get(opCtx);
ReplicationCoordinator::Mode replMode = replCoord->getReplicationMode();
invariant(begin != end);
- if (oplogDisabled(txn, replMode, nss))
+ if (oplogDisabled(opCtx, replMode, nss))
return;
const size_t count = end - begin;
std::vector<OplogDocWriter> writers;
writers.reserve(count);
- Collection* oplog = getLocalOplogCollection(txn, _oplogCollectionName);
- Lock::DBLock lk(txn->lockState(), "local", MODE_IX);
- Lock::CollectionLock lock(txn->lockState(), _oplogCollectionName, MODE_IX);
+ Collection* oplog = getLocalOplogCollection(opCtx, _oplogCollectionName);
+ Lock::DBLock lk(opCtx->lockState(), "local", MODE_IX);
+ Lock::CollectionLock lock(opCtx->lockState(), _oplogCollectionName, MODE_IX);
std::unique_ptr<OplogSlot[]> slots(new OplogSlot[count]);
- getNextOpTime(txn, oplog, replCoord, replMode, count, slots.get());
+ getNextOpTime(opCtx, oplog, replCoord, replMode, count, slots.get());
for (size_t i = 0; i < count; i++) {
writers.emplace_back(_logOpWriter(
- txn, opstr, nss, begin[i], NULL, fromMigrate, slots[i].opTime, slots[i].hash));
+ opCtx, opstr, nss, begin[i], NULL, fromMigrate, slots[i].opTime, slots[i].hash));
}
std::unique_ptr<DocWriter const* []> basePtrs(new DocWriter const*[count]);
for (size_t i = 0; i < count; i++) {
basePtrs[i] = &writers[i];
}
- _logOpsInner(txn, nss, basePtrs.get(), count, oplog, replMode, slots[count - 1].opTime);
+ _logOpsInner(opCtx, nss, basePtrs.get(), count, oplog, replMode, slots[count - 1].opTime);
}
namespace {
-long long getNewOplogSizeBytes(OperationContext* txn, const ReplSettings& replSettings) {
+long long getNewOplogSizeBytes(OperationContext* opCtx, const ReplSettings& replSettings) {
if (replSettings.getOplogSizeBytes() != 0) {
return replSettings.getOplogSizeBytes();
}
@@ -459,7 +461,7 @@ long long getNewOplogSizeBytes(OperationContext* txn, const ReplSettings& replSe
#else
long long lowerBound = 0;
double bytes = 0;
- if (txn->getClient()->getServiceContext()->getGlobalStorageEngine()->isEphemeral()) {
+ if (opCtx->getClient()->getServiceContext()->getGlobalStorageEngine()->isEphemeral()) {
// in memory: 50MB minimum size
lowerBound = 50LL * 1024 * 1024;
bytes = pi.getMemSizeMB() * 1024 * 1024;
@@ -482,19 +484,19 @@ long long getNewOplogSizeBytes(OperationContext* txn, const ReplSettings& replSe
}
} // namespace
-void createOplog(OperationContext* txn, const std::string& oplogCollectionName, bool isReplSet) {
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
+void createOplog(OperationContext* opCtx, const std::string& oplogCollectionName, bool isReplSet) {
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite lk(opCtx->lockState());
- const ReplSettings& replSettings = ReplicationCoordinator::get(txn)->getSettings();
+ const ReplSettings& replSettings = ReplicationCoordinator::get(opCtx)->getSettings();
- OldClientContext ctx(txn, oplogCollectionName);
+ OldClientContext ctx(opCtx, oplogCollectionName);
Collection* collection = ctx.db()->getCollection(oplogCollectionName);
if (collection) {
if (replSettings.getOplogSizeBytes() != 0) {
const CollectionOptions oplogOpts =
- collection->getCatalogEntry()->getCollectionOptions(txn);
+ collection->getCatalogEntry()->getCollectionOptions(opCtx);
int o = (int)(oplogOpts.cappedSize / (1024 * 1024));
int n = (int)(replSettings.getOplogSizeBytes() / (1024 * 1024));
@@ -508,12 +510,12 @@ void createOplog(OperationContext* txn, const std::string& oplogCollectionName,
}
if (!isReplSet)
- initTimestampFromOplog(txn, oplogCollectionName);
+ initTimestampFromOplog(opCtx, oplogCollectionName);
return;
}
/* create an oplog collection, if it doesn't yet exist. */
- const auto sz = getNewOplogSizeBytes(txn, replSettings);
+ const auto sz = getNewOplogSizeBytes(opCtx, replSettings);
log() << "******" << endl;
log() << "creating replication oplog of size: " << (int)(sz / (1024 * 1024)) << "MB..." << endl;
@@ -524,24 +526,24 @@ void createOplog(OperationContext* txn, const std::string& oplogCollectionName,
options.autoIndexId = CollectionOptions::NO;
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork uow(txn);
- invariant(ctx.db()->createCollection(txn, oplogCollectionName, options));
+ WriteUnitOfWork uow(opCtx);
+ invariant(ctx.db()->createCollection(opCtx, oplogCollectionName, options));
if (!isReplSet)
- getGlobalServiceContext()->getOpObserver()->onOpMessage(txn, BSONObj());
+ getGlobalServiceContext()->getOpObserver()->onOpMessage(opCtx, BSONObj());
uow.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createCollection", oplogCollectionName);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "createCollection", oplogCollectionName);
/* sync here so we don't get any surprising lag later when we try to sync */
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- storageEngine->flushAllFiles(txn, true);
+ storageEngine->flushAllFiles(opCtx, true);
log() << "******" << endl;
}
-void createOplog(OperationContext* txn) {
- const auto isReplSet = ReplicationCoordinator::get(txn)->getReplicationMode() ==
+void createOplog(OperationContext* opCtx) {
+ const auto isReplSet = ReplicationCoordinator::get(opCtx)->getReplicationMode() ==
ReplicationCoordinator::modeReplSet;
- createOplog(txn, _oplogCollectionName, isReplSet);
+ createOplog(opCtx, _oplogCollectionName, isReplSet);
}
// -------------------------------------
@@ -575,13 +577,13 @@ struct ApplyOpMetadata {
std::map<std::string, ApplyOpMetadata> opsMap = {
{"create",
- {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
+ {[](OperationContext* opCtx, const char* ns, BSONObj& cmd) -> Status {
const NamespaceString nss(parseNs(ns, cmd));
if (auto idIndexElem = cmd["idIndex"]) {
// Remove "idIndex" field from command.
auto cmdWithoutIdIndex = cmd.removeField("idIndex");
return createCollection(
- txn, nss.db().toString(), cmdWithoutIdIndex, idIndexElem.Obj());
+ opCtx, nss.db().toString(), cmdWithoutIdIndex, idIndexElem.Obj());
}
// No _id index spec was provided, so we should build a v:1 _id index.
@@ -591,55 +593,55 @@ std::map<std::string, ApplyOpMetadata> opsMap = {
idIndexSpecBuilder.append(IndexDescriptor::kIndexNameFieldName, "_id_");
idIndexSpecBuilder.append(IndexDescriptor::kNamespaceFieldName, nss.ns());
idIndexSpecBuilder.append(IndexDescriptor::kKeyPatternFieldName, BSON("_id" << 1));
- return createCollection(txn, nss.db().toString(), cmd, idIndexSpecBuilder.done());
+ return createCollection(opCtx, nss.db().toString(), cmd, idIndexSpecBuilder.done());
},
{ErrorCodes::NamespaceExists}}},
{"collMod",
- {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
+ {[](OperationContext* opCtx, const char* ns, BSONObj& cmd) -> Status {
BSONObjBuilder resultWeDontCareAbout;
- return collMod(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
+ return collMod(opCtx, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
},
{ErrorCodes::IndexNotFound, ErrorCodes::NamespaceNotFound}}},
{"dropDatabase",
- {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- return dropDatabase(txn, NamespaceString(ns).db().toString());
+ {[](OperationContext* opCtx, const char* ns, BSONObj& cmd) -> Status {
+ return dropDatabase(opCtx, NamespaceString(ns).db().toString());
},
{ErrorCodes::NamespaceNotFound}}},
{"drop",
- {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
+ {[](OperationContext* opCtx, const char* ns, BSONObj& cmd) -> Status {
BSONObjBuilder resultWeDontCareAbout;
- return dropCollection(txn, parseNs(ns, cmd), resultWeDontCareAbout);
+ return dropCollection(opCtx, parseNs(ns, cmd), resultWeDontCareAbout);
},
// IllegalOperation is necessary because in 3.0 we replicate drops of system.profile
// TODO(dannenberg) remove IllegalOperation once we no longer need 3.0 compatibility
{ErrorCodes::NamespaceNotFound, ErrorCodes::IllegalOperation}}},
// deleteIndex(es) is deprecated but still works as of April 10, 2015
{"deleteIndex",
- {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
+ {[](OperationContext* opCtx, const char* ns, BSONObj& cmd) -> Status {
BSONObjBuilder resultWeDontCareAbout;
- return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
+ return dropIndexes(opCtx, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
},
{ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}}},
{"deleteIndexes",
- {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
+ {[](OperationContext* opCtx, const char* ns, BSONObj& cmd) -> Status {
BSONObjBuilder resultWeDontCareAbout;
- return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
+ return dropIndexes(opCtx, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
},
{ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}}},
{"dropIndex",
- {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
+ {[](OperationContext* opCtx, const char* ns, BSONObj& cmd) -> Status {
BSONObjBuilder resultWeDontCareAbout;
- return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
+ return dropIndexes(opCtx, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
},
{ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}}},
{"dropIndexes",
- {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
+ {[](OperationContext* opCtx, const char* ns, BSONObj& cmd) -> Status {
BSONObjBuilder resultWeDontCareAbout;
- return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
+ return dropIndexes(opCtx, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
},
{ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}}},
{"renameCollection",
- {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
+ {[](OperationContext* opCtx, const char* ns, BSONObj& cmd) -> Status {
const auto sourceNsElt = cmd.firstElement();
const auto targetNsElt = cmd["to"];
uassert(ErrorCodes::TypeMismatch,
@@ -648,7 +650,7 @@ std::map<std::string, ApplyOpMetadata> opsMap = {
uassert(ErrorCodes::TypeMismatch,
"'to' must be of type String",
targetNsElt.type() == BSONType::String);
- return renameCollection(txn,
+ return renameCollection(opCtx,
NamespaceString(sourceNsElt.valueStringData()),
NamespaceString(targetNsElt.valueStringData()),
cmd["dropTarget"].trueValue(),
@@ -656,16 +658,16 @@ std::map<std::string, ApplyOpMetadata> opsMap = {
},
{ErrorCodes::NamespaceNotFound, ErrorCodes::NamespaceExists}}},
{"applyOps",
- {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
+ {[](OperationContext* opCtx, const char* ns, BSONObj& cmd) -> Status {
BSONObjBuilder resultWeDontCareAbout;
- return applyOps(txn, nsToDatabase(ns), cmd, &resultWeDontCareAbout);
+ return applyOps(opCtx, nsToDatabase(ns), cmd, &resultWeDontCareAbout);
},
{ErrorCodes::UnknownError}}},
- {"convertToCapped", {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- return convertToCapped(txn, parseNs(ns, cmd), cmd["size"].number());
+ {"convertToCapped", {[](OperationContext* opCtx, const char* ns, BSONObj& cmd) -> Status {
+ return convertToCapped(opCtx, parseNs(ns, cmd), cmd["size"].number());
}}},
- {"emptycapped", {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- return emptyCapped(txn, parseNs(ns, cmd));
+ {"emptycapped", {[](OperationContext* opCtx, const char* ns, BSONObj& cmd) -> Status {
+ return emptyCapped(opCtx, parseNs(ns, cmd));
}}},
};
@@ -673,14 +675,14 @@ std::map<std::string, ApplyOpMetadata> opsMap = {
// @return failure status if an update should have happened and the document DNE.
// See replset initial sync code.
-Status applyOperation_inlock(OperationContext* txn,
+Status applyOperation_inlock(OperationContext* opCtx,
Database* db,
const BSONObj& op,
bool inSteadyStateReplication,
IncrementOpsAppliedStatsFn incrementOpsAppliedStats) {
LOG(3) << "applying op: " << redact(op);
- OpCounters* opCounters = txn->writesAreReplicated() ? &globalOpCounters : &replOpCounters;
+ OpCounters* opCounters = opCtx->writesAreReplicated() ? &globalOpCounters : &replOpCounters;
const char* names[] = {"o", "ns", "op", "b", "o2"};
BSONElement fields[5];
@@ -710,19 +712,19 @@ Status applyOperation_inlock(OperationContext* txn,
if (supportsDocLocking()) {
// WiredTiger, and others requires MODE_IX since the applier threads driving
// this allow writes to the same collection on any thread.
- dassert(txn->lockState()->isCollectionLockedForMode(ns, MODE_IX));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns, MODE_IX));
} else {
// mmapV1 ensures that all operations to the same collection are executed from
// the same worker thread, so it takes an exclusive lock (MODE_X)
- dassert(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns, MODE_X));
}
}
Collection* collection = db->getCollection(ns);
IndexCatalog* indexCatalog = collection == nullptr ? nullptr : collection->getIndexCatalog();
- const bool haveWrappingWriteUnitOfWork = txn->lockState()->inAWriteUnitOfWork();
+ const bool haveWrappingWriteUnitOfWork = opCtx->lockState()->inAWriteUnitOfWork();
uassert(ErrorCodes::CommandNotSupportedOnView,
str::stream() << "applyOps not supported on view: " << ns,
- collection || !db->getViewCatalog()->lookup(txn, ns));
+ collection || !db->getViewCatalog()->lookup(opCtx, ns));
// operation type -- see logOp() comments for types
const char* opType = fieldOp.valuestrsafe();
@@ -768,15 +770,15 @@ Status applyOperation_inlock(OperationContext* txn,
}
bool relaxIndexConstraints =
- ReplicationCoordinator::get(txn)->shouldRelaxIndexConstraints(txn, indexNss);
+ ReplicationCoordinator::get(opCtx)->shouldRelaxIndexConstraints(opCtx, indexNss);
if (indexSpec["background"].trueValue()) {
- Lock::TempRelease release(txn->lockState());
- if (txn->lockState()->isLocked()) {
+ Lock::TempRelease release(opCtx->lockState());
+ if (opCtx->lockState()->isLocked()) {
// If TempRelease fails, background index build will deadlock.
LOG(3) << "apply op: building background index " << indexSpec
<< " in the foreground because temp release failed";
IndexBuilder builder(indexSpec, relaxIndexConstraints);
- Status status = builder.buildInForeground(txn, db);
+ Status status = builder.buildInForeground(opCtx, db);
uassertStatusOK(status);
} else {
IndexBuilder* builder = new IndexBuilder(indexSpec, relaxIndexConstraints);
@@ -785,10 +787,10 @@ Status applyOperation_inlock(OperationContext* txn,
// Wait for thread to start and register itself
IndexBuilder::waitForBgIndexStarting();
}
- txn->recoveryUnit()->abandonSnapshot();
+ opCtx->recoveryUnit()->abandonSnapshot();
} else {
IndexBuilder builder(indexSpec, relaxIndexConstraints);
- Status status = builder.buildInForeground(txn, db);
+ Status status = builder.buildInForeground(opCtx, db);
uassertStatusOK(status);
}
// Since this is an index operation we can return without falling through.
@@ -812,10 +814,10 @@ Status applyOperation_inlock(OperationContext* txn,
str::stream() << "Failed to apply insert due to empty array element: "
<< op.toString(),
!insertObjs.empty());
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
OpDebug* const nullOpDebug = nullptr;
Status status = collection->insertDocuments(
- txn, insertObjs.begin(), insertObjs.end(), nullOpDebug, true);
+ opCtx, insertObjs.begin(), insertObjs.end(), nullOpDebug, true);
if (!status.isOK()) {
return status;
}
@@ -849,10 +851,10 @@ Status applyOperation_inlock(OperationContext* txn,
bool needToDoUpsert = haveWrappingWriteUnitOfWork;
if (!needToDoUpsert) {
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
try {
OpDebug* const nullOpDebug = nullptr;
- status = collection->insertDocument(txn, o, nullOpDebug, true);
+ status = collection->insertDocument(opCtx, o, nullOpDebug, true);
} catch (DBException dbe) {
status = dbe.toStatus();
}
@@ -881,7 +883,7 @@ Status applyOperation_inlock(OperationContext* txn,
UpdateLifecycleImpl updateLifecycle(requestNs);
request.setLifecycle(&updateLifecycle);
- UpdateResult res = update(txn, db, request);
+ UpdateResult res = update(opCtx, db, request);
if (res.numMatched == 0 && res.upserted.isEmpty()) {
error() << "No document was updated even though we got a DuplicateKey "
"error when inserting";
@@ -912,7 +914,7 @@ Status applyOperation_inlock(OperationContext* txn,
UpdateLifecycleImpl updateLifecycle(requestNs);
request.setLifecycle(&updateLifecycle);
- UpdateResult ur = update(txn, db, request);
+ UpdateResult ur = update(opCtx, db, request);
if (ur.numMatched == 0 && ur.upserted.isEmpty()) {
if (ur.modifiers) {
@@ -929,11 +931,11 @@ Status applyOperation_inlock(OperationContext* txn,
// { _id:..., { x : {$size:...} }
// thus this is not ideal.
if (collection == NULL ||
- (indexCatalog->haveIdIndex(txn) &&
- Helpers::findById(txn, collection, updateCriteria).isNull()) ||
+ (indexCatalog->haveIdIndex(opCtx) &&
+ Helpers::findById(opCtx, collection, updateCriteria).isNull()) ||
// capped collections won't have an _id index
- (!indexCatalog->haveIdIndex(txn) &&
- Helpers::findOne(txn, collection, updateCriteria, false).isNull())) {
+ (!indexCatalog->haveIdIndex(opCtx) &&
+ Helpers::findOne(opCtx, collection, updateCriteria, false).isNull())) {
string msg = str::stream() << "couldn't find doc: " << redact(op);
error() << msg;
return Status(ErrorCodes::OperationFailed, msg);
@@ -963,7 +965,7 @@ Status applyOperation_inlock(OperationContext* txn,
o.hasField("_id"));
if (opType[1] == 0) {
- deleteObjects(txn, collection, ns, o, PlanExecutor::YIELD_MANUAL, /*justOne*/ valueB);
+ deleteObjects(opCtx, collection, ns, o, PlanExecutor::YIELD_MANUAL, /*justOne*/ valueB);
} else
verify(opType[1] == 'b'); // "db" advertisement
if (incrementOpsAppliedStats) {
@@ -984,15 +986,15 @@ Status applyOperation_inlock(OperationContext* txn,
// have a wrapping WUOW, the extra nexting is harmless. The logOp really should have been
// done in the WUOW that did the write, but this won't happen because applyOps turns off
// observers.
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
getGlobalAuthorizationManager()->logOp(
- txn, opType, ns.toString().c_str(), o, fieldO2.isABSONObj() ? &o2 : NULL);
+ opCtx, opType, ns.toString().c_str(), o, fieldO2.isABSONObj() ? &o2 : NULL);
wuow.commit();
return Status::OK();
}
-Status applyCommand_inlock(OperationContext* txn,
+Status applyCommand_inlock(OperationContext* opCtx,
const BSONObj& op,
bool inSteadyStateReplication) {
const char* names[] = {"o", "ns", "op"};
@@ -1023,8 +1025,8 @@ Status applyCommand_inlock(OperationContext* txn,
return {ErrorCodes::InvalidNamespace, "invalid ns: " + std::string(nss.ns())};
}
{
- Database* db = dbHolder().get(txn, nss.ns());
- if (db && !db->getCollection(nss.ns()) && db->getViewCatalog()->lookup(txn, nss.ns())) {
+ Database* db = dbHolder().get(opCtx, nss.ns());
+ if (db && !db->getCollection(nss.ns()) && db->getViewCatalog()->lookup(opCtx, nss.ns())) {
return {ErrorCodes::CommandNotSupportedOnView,
str::stream() << "applyOps not supported on view:" << nss.ns()};
}
@@ -1040,7 +1042,7 @@ Status applyCommand_inlock(OperationContext* txn,
// Applying commands in repl is done under Global W-lock, so it is safe to not
// perform the current DB checks after reacquiring the lock.
- invariant(txn->lockState()->isW());
+ invariant(opCtx->lockState()->isW());
bool done = false;
@@ -1054,7 +1056,7 @@ Status applyCommand_inlock(OperationContext* txn,
ApplyOpMetadata curOpToApply = op->second;
Status status = Status::OK();
try {
- status = curOpToApply.applyFunc(txn, nss.ns().c_str(), o);
+ status = curOpToApply.applyFunc(opCtx, nss.ns().c_str(), o);
} catch (...) {
status = exceptionToStatus();
}
@@ -1065,21 +1067,21 @@ Status applyCommand_inlock(OperationContext* txn,
throw WriteConflictException();
}
case ErrorCodes::BackgroundOperationInProgressForDatabase: {
- Lock::TempRelease release(txn->lockState());
+ Lock::TempRelease release(opCtx->lockState());
BackgroundOperation::awaitNoBgOpInProgForDb(nss.db());
- txn->recoveryUnit()->abandonSnapshot();
- txn->checkForInterrupt();
+ opCtx->recoveryUnit()->abandonSnapshot();
+ opCtx->checkForInterrupt();
break;
}
case ErrorCodes::BackgroundOperationInProgressForNamespace: {
- Lock::TempRelease release(txn->lockState());
+ Lock::TempRelease release(opCtx->lockState());
Command* cmd = Command::findCommand(o.firstElement().fieldName());
invariant(cmd);
BackgroundOperation::awaitNoBgOpInProgForNs(cmd->parseNs(nss.db().toString(), o));
- txn->recoveryUnit()->abandonSnapshot();
- txn->checkForInterrupt();
+ opCtx->recoveryUnit()->abandonSnapshot();
+ opCtx->checkForInterrupt();
break;
}
default:
@@ -1101,8 +1103,8 @@ Status applyCommand_inlock(OperationContext* txn,
// AuthorizationManager's logOp method registers a RecoveryUnit::Change
// and to do so we need to have begun a UnitOfWork
- WriteUnitOfWork wuow(txn);
- getGlobalAuthorizationManager()->logOp(txn, opType, nss.ns().c_str(), o, nullptr);
+ WriteUnitOfWork wuow(opCtx);
+ getGlobalAuthorizationManager()->logOp(opCtx, opType, nss.ns().c_str(), o, nullptr);
wuow.commit();
return Status::OK();
@@ -1114,19 +1116,19 @@ void setNewTimestamp(ServiceContext* service, const Timestamp& newTime) {
newTimestampNotifier.notify_all();
}
-void initTimestampFromOplog(OperationContext* txn, const std::string& oplogNS) {
- DBDirectClient c(txn);
+void initTimestampFromOplog(OperationContext* opCtx, const std::string& oplogNS) {
+ DBDirectClient c(opCtx);
BSONObj lastOp = c.findOne(oplogNS, Query().sort(reverseNaturalObj), NULL, QueryOption_SlaveOk);
if (!lastOp.isEmpty()) {
LOG(1) << "replSet setting last Timestamp";
const OpTime opTime = fassertStatusOK(28696, OpTime::parseFromOplogEntry(lastOp));
- setNewTimestamp(txn->getServiceContext(), opTime.getTimestamp());
+ setNewTimestamp(opCtx->getServiceContext(), opTime.getTimestamp());
}
}
-void oplogCheckCloseDatabase(OperationContext* txn, Database* db) {
- invariant(txn->lockState()->isW());
+void oplogCheckCloseDatabase(OperationContext* opCtx, Database* db) {
+ invariant(opCtx->lockState()->isW());
_localOplogCollection = nullptr;
}
@@ -1213,8 +1215,8 @@ void SnapshotThread::run() {
}
try {
- auto txn = client.makeOperationContext();
- Lock::GlobalLock globalLock(txn->lockState(), MODE_IS, UINT_MAX);
+ auto opCtx = client.makeOperationContext();
+ Lock::GlobalLock globalLock(opCtx->lockState(), MODE_IS, UINT_MAX);
if (!replCoord->getMemberState().readable()) {
// If our MemberState isn't readable, we may not be in a consistent state so don't
@@ -1231,9 +1233,9 @@ void SnapshotThread::run() {
// Make sure there are no in-flight capped inserts while we create our snapshot.
// This lock cannot be aquired until all writes holding the resource commit/abort.
Lock::ResourceLock cappedInsertLockForOtherDb(
- txn->lockState(), resourceCappedInFlightForOtherDb, MODE_X);
+ opCtx->lockState(), resourceCappedInFlightForOtherDb, MODE_X);
Lock::ResourceLock cappedInsertLockForLocalDb(
- txn->lockState(), resourceCappedInFlightForLocalDb, MODE_X);
+ opCtx->lockState(), resourceCappedInFlightForLocalDb, MODE_X);
// Reserve the name immediately before we take our snapshot. This ensures that all
// names that compare lower must be from points in time visible to this named
@@ -1241,15 +1243,15 @@ void SnapshotThread::run() {
name = replCoord->reserveSnapshotName(nullptr);
// This establishes the view that we will name.
- _manager->prepareForCreateSnapshot(txn.get());
+ _manager->prepareForCreateSnapshot(opCtx.get());
}
auto opTimeOfSnapshot = OpTime();
{
- AutoGetCollectionForRead oplog(txn.get(), NamespaceString(rsOplogName));
+ AutoGetCollectionForRead oplog(opCtx.get(), NamespaceString(rsOplogName));
invariant(oplog.getCollection());
// Read the latest op from the oplog.
- auto cursor = oplog.getCollection()->getCursor(txn.get(), /*forward*/ false);
+ auto cursor = oplog.getCollection()->getCursor(opCtx.get(), /*forward*/ false);
auto record = cursor->next();
if (!record)
continue; // oplog is completely empty.
@@ -1259,7 +1261,7 @@ void SnapshotThread::run() {
invariant(!opTimeOfSnapshot.isNull());
}
- replCoord->createSnapshot(txn.get(), opTimeOfSnapshot, name);
+ replCoord->createSnapshot(opCtx.get(), opTimeOfSnapshot, name);
} catch (const WriteConflictException& wce) {
log() << "skipping storage snapshot pass due to write conflict";
continue;
diff --git a/src/mongo/db/repl/oplog.h b/src/mongo/db/repl/oplog.h
index dc61805e789..fcff70fb607 100644
--- a/src/mongo/db/repl/oplog.h
+++ b/src/mongo/db/repl/oplog.h
@@ -50,20 +50,20 @@ class ReplSettings;
/**
* Truncates the oplog after, and including, the "truncateTimestamp" entry.
*/
-void truncateOplogTo(OperationContext* txn, Timestamp truncateTimestamp);
+void truncateOplogTo(OperationContext* opCtx, Timestamp truncateTimestamp);
/**
* Create a new capped collection for the oplog if it doesn't yet exist.
* If the collection already exists (and isReplSet is false),
* set the 'last' Timestamp from the last entry of the oplog collection (side effect!)
*/
-void createOplog(OperationContext* txn, const std::string& oplogCollectionName, bool isReplSet);
+void createOplog(OperationContext* opCtx, const std::string& oplogCollectionName, bool isReplSet);
/*
* Shortcut for above function using oplogCollectionName = _oplogCollectionName,
* and replEnabled = replCoord::isReplSet();
*/
-void createOplog(OperationContext* txn);
+void createOplog(OperationContext* opCtx);
extern std::string rsOplogName;
extern std::string masterSlaveOplogName;
@@ -81,7 +81,7 @@ extern int OPLOG_VERSION;
* "db" declares presence of a database (ns is set to the db name + '.')
*/
-void logOps(OperationContext* txn,
+void logOps(OperationContext* opCtx,
const char* opstr,
const NamespaceString& nss,
std::vector<BSONObj>::const_iterator begin,
@@ -91,7 +91,7 @@ void logOps(OperationContext* txn,
/* For 'u' records, 'obj' captures the mutation made to the object but not
* the object itself. 'o2' captures the the criteria for the object that will be modified.
*/
-void logOp(OperationContext* txn,
+void logOp(OperationContext* opCtx,
const char* opstr,
const char* ns,
const BSONObj& obj,
@@ -100,7 +100,7 @@ void logOp(OperationContext* txn,
// Flush out the cached pointers to the local database and oplog.
// Used by the closeDatabase command to ensure we don't cache closed things.
-void oplogCheckCloseDatabase(OperationContext* txn, Database* db);
+void oplogCheckCloseDatabase(OperationContext* opCtx, Database* db);
using IncrementOpsAppliedStatsFn = stdx::function<void()>;
/**
@@ -110,7 +110,7 @@ using IncrementOpsAppliedStatsFn = stdx::function<void()>;
* @param incrementOpsAppliedStats is called whenever an op is applied.
* Returns failure status if the op was an update that could not be applied.
*/
-Status applyOperation_inlock(OperationContext* txn,
+Status applyOperation_inlock(OperationContext* opCtx,
Database* db,
const BSONObj& op,
bool inSteadyStateReplication = false,
@@ -123,17 +123,19 @@ Status applyOperation_inlock(OperationContext* txn,
* initial sync.
* Returns failure status if the op that could not be applied.
*/
-Status applyCommand_inlock(OperationContext* txn, const BSONObj& op, bool inSteadyStateReplication);
+Status applyCommand_inlock(OperationContext* opCtx,
+ const BSONObj& op,
+ bool inSteadyStateReplication);
/**
* Initializes the global Timestamp with the value from the timestamp of the last oplog entry.
*/
-void initTimestampFromOplog(OperationContext* txn, const std::string& oplogNS);
+void initTimestampFromOplog(OperationContext* opCtx, const std::string& oplogNS);
/**
* Sets the global Timestamp to be 'newTime'.
*/
-void setNewTimestamp(ServiceContext* txn, const Timestamp& newTime);
+void setNewTimestamp(ServiceContext* opCtx, const Timestamp& newTime);
/**
* Detects the current replication mode and sets the "_oplogCollectionName" accordingly.
diff --git a/src/mongo/db/repl/oplog_buffer.h b/src/mongo/db/repl/oplog_buffer.h
index 9695260c691..f177808a991 100644
--- a/src/mongo/db/repl/oplog_buffer.h
+++ b/src/mongo/db/repl/oplog_buffer.h
@@ -70,7 +70,7 @@ public:
* create backing storage, etc). This method may be called at most once for the lifetime of an
* oplog buffer.
*/
- virtual void startup(OperationContext* txn) = 0;
+ virtual void startup(OperationContext* opCtx) = 0;
/**
* Signals to the oplog buffer that it should shut down. This method may block. After
@@ -79,7 +79,7 @@ public:
* It is legal to call this method multiple times, but it should only be called after startup
* has been called.
*/
- virtual void shutdown(OperationContext* txn) = 0;
+ virtual void shutdown(OperationContext* opCtx) = 0;
/**
* Pushes operation into oplog buffer, ignoring any size constraints. Does not block.
@@ -87,26 +87,26 @@ public:
* the limit returned by getMaxSize() but should not otherwise adversely affect normal
* functionality such as pushing and popping operations from the oplog buffer.
*/
- virtual void pushEvenIfFull(OperationContext* txn, const Value& value) = 0;
+ virtual void pushEvenIfFull(OperationContext* opCtx, const Value& value) = 0;
/**
* Pushes operation into oplog buffer.
* If there are size constraints on the oplog buffer, this may block until sufficient space
* is made available (by popping) to complete this operation.
*/
- virtual void push(OperationContext* txn, const Value& value) = 0;
+ virtual void push(OperationContext* opCtx, const Value& value) = 0;
/**
* Pushes operations in the iterator range [begin, end) into the oplog buffer without blocking.
*/
- virtual void pushAllNonBlocking(OperationContext* txn,
+ virtual void pushAllNonBlocking(OperationContext* opCtx,
Batch::const_iterator begin,
Batch::const_iterator end) = 0;
/**
* Returns when enough space is available.
*/
- virtual void waitForSpace(OperationContext* txn, std::size_t size) = 0;
+ virtual void waitForSpace(OperationContext* opCtx, std::size_t size) = 0;
/**
* Returns true if oplog buffer is empty.
@@ -135,13 +135,13 @@ public:
/**
* Clears oplog buffer.
*/
- virtual void clear(OperationContext* txn) = 0;
+ virtual void clear(OperationContext* opCtx) = 0;
/**
* Returns false if oplog buffer is empty. "value" is left unchanged.
* Otherwise, removes last item (saves in "value") from the oplog buffer and returns true.
*/
- virtual bool tryPop(OperationContext* txn, Value* value) = 0;
+ virtual bool tryPop(OperationContext* opCtx, Value* value) = 0;
/**
* Waits "waitDuration" for an operation to be pushed into the oplog buffer.
@@ -154,12 +154,12 @@ public:
* Returns false if oplog buffer is empty.
* Otherwise, returns true and sets "value" to last item in oplog buffer.
*/
- virtual bool peek(OperationContext* txn, Value* value) = 0;
+ virtual bool peek(OperationContext* opCtx, Value* value) = 0;
/**
* Returns the item most recently added to the oplog buffer or nothing if the buffer is empty.
*/
- virtual boost::optional<Value> lastObjectPushed(OperationContext* txn) const = 0;
+ virtual boost::optional<Value> lastObjectPushed(OperationContext* opCtx) const = 0;
};
} // namespace repl
diff --git a/src/mongo/db/repl/oplog_buffer_blocking_queue.cpp b/src/mongo/db/repl/oplog_buffer_blocking_queue.cpp
index 9b9cdb82dac..72eb401ddb5 100644
--- a/src/mongo/db/repl/oplog_buffer_blocking_queue.cpp
+++ b/src/mongo/db/repl/oplog_buffer_blocking_queue.cpp
@@ -49,8 +49,8 @@ OplogBufferBlockingQueue::OplogBufferBlockingQueue() : _queue(kOplogBufferSize,
void OplogBufferBlockingQueue::startup(OperationContext*) {}
-void OplogBufferBlockingQueue::shutdown(OperationContext* txn) {
- clear(txn);
+void OplogBufferBlockingQueue::shutdown(OperationContext* opCtx) {
+ clear(opCtx);
}
void OplogBufferBlockingQueue::pushEvenIfFull(OperationContext*, const Value& value) {
diff --git a/src/mongo/db/repl/oplog_buffer_blocking_queue.h b/src/mongo/db/repl/oplog_buffer_blocking_queue.h
index b0fa36a8157..68c74779b0e 100644
--- a/src/mongo/db/repl/oplog_buffer_blocking_queue.h
+++ b/src/mongo/db/repl/oplog_buffer_blocking_queue.h
@@ -41,23 +41,23 @@ class OplogBufferBlockingQueue final : public OplogBuffer {
public:
OplogBufferBlockingQueue();
- void startup(OperationContext* txn) override;
- void shutdown(OperationContext* txn) override;
- void pushEvenIfFull(OperationContext* txn, const Value& value) override;
- void push(OperationContext* txn, const Value& value) override;
- void pushAllNonBlocking(OperationContext* txn,
+ void startup(OperationContext* opCtx) override;
+ void shutdown(OperationContext* opCtx) override;
+ void pushEvenIfFull(OperationContext* opCtx, const Value& value) override;
+ void push(OperationContext* opCtx, const Value& value) override;
+ void pushAllNonBlocking(OperationContext* opCtx,
Batch::const_iterator begin,
Batch::const_iterator end) override;
- void waitForSpace(OperationContext* txn, std::size_t size) override;
+ void waitForSpace(OperationContext* opCtx, std::size_t size) override;
bool isEmpty() const override;
std::size_t getMaxSize() const override;
std::size_t getSize() const override;
std::size_t getCount() const override;
- void clear(OperationContext* txn) override;
- bool tryPop(OperationContext* txn, Value* value) override;
+ void clear(OperationContext* opCtx) override;
+ bool tryPop(OperationContext* opCtx, Value* value) override;
bool waitForData(Seconds waitDuration) override;
- bool peek(OperationContext* txn, Value* value) override;
- boost::optional<Value> lastObjectPushed(OperationContext* txn) const override;
+ bool peek(OperationContext* opCtx, Value* value) override;
+ boost::optional<Value> lastObjectPushed(OperationContext* opCtx) const override;
private:
BlockingQueue<BSONObj> _queue;
diff --git a/src/mongo/db/repl/oplog_buffer_collection.cpp b/src/mongo/db/repl/oplog_buffer_collection.cpp
index 1c74963cc14..f738312a3eb 100644
--- a/src/mongo/db/repl/oplog_buffer_collection.cpp
+++ b/src/mongo/db/repl/oplog_buffer_collection.cpp
@@ -93,27 +93,27 @@ OplogBufferCollection::Options OplogBufferCollection::getOptions() const {
return _options;
}
-void OplogBufferCollection::startup(OperationContext* txn) {
- clear(txn);
+void OplogBufferCollection::startup(OperationContext* opCtx) {
+ clear(opCtx);
}
-void OplogBufferCollection::shutdown(OperationContext* txn) {
+void OplogBufferCollection::shutdown(OperationContext* opCtx) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
- _dropCollection(txn);
+ _dropCollection(opCtx);
_size = 0;
_count = 0;
}
-void OplogBufferCollection::pushEvenIfFull(OperationContext* txn, const Value& value) {
+void OplogBufferCollection::pushEvenIfFull(OperationContext* opCtx, const Value& value) {
Batch valueBatch = {value};
- pushAllNonBlocking(txn, valueBatch.begin(), valueBatch.end());
+ pushAllNonBlocking(opCtx, valueBatch.begin(), valueBatch.end());
}
-void OplogBufferCollection::push(OperationContext* txn, const Value& value) {
- pushEvenIfFull(txn, value);
+void OplogBufferCollection::push(OperationContext* opCtx, const Value& value) {
+ pushEvenIfFull(opCtx, value);
}
-void OplogBufferCollection::pushAllNonBlocking(OperationContext* txn,
+void OplogBufferCollection::pushAllNonBlocking(OperationContext* opCtx,
Batch::const_iterator begin,
Batch::const_iterator end) {
if (begin == end) {
@@ -132,7 +132,7 @@ void OplogBufferCollection::pushAllNonBlocking(OperationContext* txn,
return doc;
});
- auto status = _storageInterface->insertDocuments(txn, _nss, docsToInsert);
+ auto status = _storageInterface->insertDocuments(opCtx, _nss, docsToInsert);
fassertStatusOK(40161, status);
_lastPushedTimestamp = ts;
@@ -144,7 +144,7 @@ void OplogBufferCollection::pushAllNonBlocking(OperationContext* txn,
_cvNoLongerEmpty.notify_all();
}
-void OplogBufferCollection::waitForSpace(OperationContext* txn, std::size_t size) {}
+void OplogBufferCollection::waitForSpace(OperationContext* opCtx, std::size_t size) {}
bool OplogBufferCollection::isEmpty() const {
stdx::lock_guard<stdx::mutex> lk(_mutex);
@@ -165,10 +165,10 @@ std::size_t OplogBufferCollection::getCount() const {
return _count;
}
-void OplogBufferCollection::clear(OperationContext* txn) {
+void OplogBufferCollection::clear(OperationContext* opCtx) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
- _dropCollection(txn);
- _createCollection(txn);
+ _dropCollection(opCtx);
+ _createCollection(opCtx);
_size = 0;
_count = 0;
_sentinelCount = 0;
@@ -177,12 +177,12 @@ void OplogBufferCollection::clear(OperationContext* txn) {
_peekCache = std::queue<BSONObj>();
}
-bool OplogBufferCollection::tryPop(OperationContext* txn, Value* value) {
+bool OplogBufferCollection::tryPop(OperationContext* opCtx, Value* value) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_count == 0) {
return false;
}
- return _pop_inlock(txn, value);
+ return _pop_inlock(opCtx, value);
}
bool OplogBufferCollection::waitForData(Seconds waitDuration) {
@@ -194,24 +194,24 @@ bool OplogBufferCollection::waitForData(Seconds waitDuration) {
return _count != 0;
}
-bool OplogBufferCollection::peek(OperationContext* txn, Value* value) {
+bool OplogBufferCollection::peek(OperationContext* opCtx, Value* value) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_count == 0) {
return false;
}
- *value = _peek_inlock(txn, PeekMode::kExtractEmbeddedDocument);
+ *value = _peek_inlock(opCtx, PeekMode::kExtractEmbeddedDocument);
return true;
}
boost::optional<OplogBuffer::Value> OplogBufferCollection::lastObjectPushed(
- OperationContext* txn) const {
+ OperationContext* opCtx) const {
stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_count == 0) {
return boost::none;
}
const auto docs =
fassertStatusOK(40348,
- _storageInterface->findDocuments(txn,
+ _storageInterface->findDocuments(opCtx,
_nss,
kIdIdxName,
StorageInterface::ScanDirection::kBackward,
@@ -222,9 +222,9 @@ boost::optional<OplogBuffer::Value> OplogBufferCollection::lastObjectPushed(
return extractEmbeddedOplogDocument(docs.front()).getOwned();
}
-bool OplogBufferCollection::_pop_inlock(OperationContext* txn, Value* value) {
+bool OplogBufferCollection::_pop_inlock(OperationContext* opCtx, Value* value) {
BSONObj docFromCollection =
- _peek_inlock(txn, PeekMode::kReturnUnmodifiedDocumentFromCollection);
+ _peek_inlock(opCtx, PeekMode::kReturnUnmodifiedDocumentFromCollection);
_lastPoppedKey = docFromCollection["_id"].wrap("");
*value = extractEmbeddedOplogDocument(docFromCollection).getOwned();
@@ -239,7 +239,7 @@ bool OplogBufferCollection::_pop_inlock(OperationContext* txn, Value* value) {
return true;
}
-BSONObj OplogBufferCollection::_peek_inlock(OperationContext* txn, PeekMode peekMode) {
+BSONObj OplogBufferCollection::_peek_inlock(OperationContext* opCtx, PeekMode peekMode) {
invariant(_count > 0);
BSONObj startKey;
@@ -259,7 +259,7 @@ BSONObj OplogBufferCollection::_peek_inlock(OperationContext* txn, PeekMode peek
std::size_t limit = isPeekCacheEnabled ? _options.peekCacheSize : 1U;
const auto docs = fassertStatusOK(
40163,
- _storageInterface->findDocuments(txn,
+ _storageInterface->findDocuments(opCtx,
_nss,
kIdIdxName,
StorageInterface::ScanDirection::kForward,
@@ -286,14 +286,14 @@ BSONObj OplogBufferCollection::_peek_inlock(OperationContext* txn, PeekMode peek
MONGO_UNREACHABLE;
}
-void OplogBufferCollection::_createCollection(OperationContext* txn) {
+void OplogBufferCollection::_createCollection(OperationContext* opCtx) {
CollectionOptions options;
options.temp = true;
- fassert(40154, _storageInterface->createCollection(txn, _nss, options));
+ fassert(40154, _storageInterface->createCollection(opCtx, _nss, options));
}
-void OplogBufferCollection::_dropCollection(OperationContext* txn) {
- fassert(40155, _storageInterface->dropCollection(txn, _nss));
+void OplogBufferCollection::_dropCollection(OperationContext* opCtx) {
+ fassert(40155, _storageInterface->dropCollection(opCtx, _nss));
}
std::size_t OplogBufferCollection::getSentinelCount_forTest() const {
diff --git a/src/mongo/db/repl/oplog_buffer_collection.h b/src/mongo/db/repl/oplog_buffer_collection.h
index cb4cbcac8b8..1636b5bc7b8 100644
--- a/src/mongo/db/repl/oplog_buffer_collection.h
+++ b/src/mongo/db/repl/oplog_buffer_collection.h
@@ -109,27 +109,27 @@ public:
*/
Options getOptions() const;
- void startup(OperationContext* txn) override;
- void shutdown(OperationContext* txn) override;
- void pushEvenIfFull(OperationContext* txn, const Value& value) override;
- void push(OperationContext* txn, const Value& value) override;
+ void startup(OperationContext* opCtx) override;
+ void shutdown(OperationContext* opCtx) override;
+ void pushEvenIfFull(OperationContext* opCtx, const Value& value) override;
+ void push(OperationContext* opCtx, const Value& value) override;
/**
* Pushing documents with 'pushAllNonBlocking' will not handle sentinel documents properly. If
* pushing sentinel documents is required, use 'push' or 'pushEvenIfFull'.
*/
- void pushAllNonBlocking(OperationContext* txn,
+ void pushAllNonBlocking(OperationContext* opCtx,
Batch::const_iterator begin,
Batch::const_iterator end) override;
- void waitForSpace(OperationContext* txn, std::size_t size) override;
+ void waitForSpace(OperationContext* opCtx, std::size_t size) override;
bool isEmpty() const override;
std::size_t getMaxSize() const override;
std::size_t getSize() const override;
std::size_t getCount() const override;
- void clear(OperationContext* txn) override;
- bool tryPop(OperationContext* txn, Value* value) override;
+ void clear(OperationContext* opCtx) override;
+ bool tryPop(OperationContext* opCtx, Value* value) override;
bool waitForData(Seconds waitDuration) override;
- bool peek(OperationContext* txn, Value* value) override;
- boost::optional<Value> lastObjectPushed(OperationContext* txn) const override;
+ bool peek(OperationContext* opCtx, Value* value) override;
+ boost::optional<Value> lastObjectPushed(OperationContext* opCtx) const override;
// ---- Testing API ----
std::size_t getSentinelCount_forTest() const;
@@ -141,19 +141,19 @@ private:
/*
* Creates a temporary collection with the _nss namespace.
*/
- void _createCollection(OperationContext* txn);
+ void _createCollection(OperationContext* opCtx);
/*
* Drops the collection with the _nss namespace.
*/
- void _dropCollection(OperationContext* txn);
+ void _dropCollection(OperationContext* opCtx);
enum class PeekMode { kExtractEmbeddedDocument, kReturnUnmodifiedDocumentFromCollection };
/**
* Returns the oldest oplog entry in the buffer.
* Assumes the buffer is not empty.
*/
- BSONObj _peek_inlock(OperationContext* txn, PeekMode peekMode);
+ BSONObj _peek_inlock(OperationContext* opCtx, PeekMode peekMode);
// Storage interface used to perform storage engine level functions on the collection.
StorageInterface* _storageInterface;
@@ -161,7 +161,7 @@ private:
/**
* Pops an entry off the buffer in a lock.
*/
- bool _pop_inlock(OperationContext* txn, Value* value);
+ bool _pop_inlock(OperationContext* opCtx, Value* value);
// The namespace for the oplog buffer collection.
const NamespaceString _nss;
diff --git a/src/mongo/db/repl/oplog_buffer_collection_test.cpp b/src/mongo/db/repl/oplog_buffer_collection_test.cpp
index ced9ab1f495..1ee2c3ccc8c 100644
--- a/src/mongo/db/repl/oplog_buffer_collection_test.cpp
+++ b/src/mongo/db/repl/oplog_buffer_collection_test.cpp
@@ -60,7 +60,7 @@ protected:
ServiceContext::UniqueOperationContext makeOperationContext() const;
StorageInterface* _storageInterface = nullptr;
- ServiceContext::UniqueOperationContext _txn;
+ ServiceContext::UniqueOperationContext _opCtx;
private:
void setUp() override;
@@ -82,11 +82,11 @@ void OplogBufferCollectionTest::setUp() {
_storageInterface = storageInterface.get();
StorageInterface::set(service, std::move(storageInterface));
- _txn = makeOperationContext();
+ _opCtx = makeOperationContext();
}
void OplogBufferCollectionTest::tearDown() {
- _txn.reset();
+ _opCtx.reset();
_storageInterface = nullptr;
ServiceContextMongoDTest::tearDown();
}
@@ -131,50 +131,50 @@ TEST_F(OplogBufferCollectionTest, GetNamespace) {
ASSERT_EQUALS(nss, OplogBufferCollection(_storageInterface, nss).getNamespace());
}
-void testStartupCreatesCollection(OperationContext* txn,
+void testStartupCreatesCollection(OperationContext* opCtx,
StorageInterface* storageInterface,
const NamespaceString& nss) {
OplogBufferCollection oplogBuffer(storageInterface, nss);
// Collection should not exist until startup() is called.
- ASSERT_FALSE(AutoGetCollectionForRead(txn, nss).getCollection());
+ ASSERT_FALSE(AutoGetCollectionForRead(opCtx, nss).getCollection());
- oplogBuffer.startup(txn);
- ASSERT_TRUE(AutoGetCollectionForRead(txn, nss).getCollection());
+ oplogBuffer.startup(opCtx);
+ ASSERT_TRUE(AutoGetCollectionForRead(opCtx, nss).getCollection());
}
TEST_F(OplogBufferCollectionTest, StartupWithDefaultNamespaceCreatesCollection) {
auto nss = OplogBufferCollection::getDefaultNamespace();
ASSERT_FALSE(nss.isOplog());
- testStartupCreatesCollection(_txn.get(), _storageInterface, nss);
+ testStartupCreatesCollection(_opCtx.get(), _storageInterface, nss);
}
TEST_F(OplogBufferCollectionTest, StartupWithUserProvidedNamespaceCreatesCollection) {
- testStartupCreatesCollection(_txn.get(), _storageInterface, makeNamespace(_agent));
+ testStartupCreatesCollection(_opCtx.get(), _storageInterface, makeNamespace(_agent));
}
TEST_F(OplogBufferCollectionTest, StartupDropsExistingCollectionBeforeCreatingNewCollection) {
auto nss = makeNamespace(_agent);
- ASSERT_OK(_storageInterface->createCollection(_txn.get(), nss, CollectionOptions()));
+ ASSERT_OK(_storageInterface->createCollection(_opCtx.get(), nss, CollectionOptions()));
OplogBufferCollection oplogBuffer(_storageInterface, nss);
- oplogBuffer.startup(_txn.get());
- ASSERT_TRUE(AutoGetCollectionForRead(_txn.get(), nss).getCollection());
+ oplogBuffer.startup(_opCtx.get());
+ ASSERT_TRUE(AutoGetCollectionForRead(_opCtx.get(), nss).getCollection());
}
DEATH_TEST_F(OplogBufferCollectionTest,
StartupWithOplogNamespaceTriggersFatalAssertion,
"Fatal assertion 40154 Location28838: cannot create a non-capped oplog collection") {
- testStartupCreatesCollection(_txn.get(), _storageInterface, NamespaceString("local.oplog.Z"));
+ testStartupCreatesCollection(_opCtx.get(), _storageInterface, NamespaceString("local.oplog.Z"));
}
TEST_F(OplogBufferCollectionTest, ShutdownDropsCollection) {
auto nss = makeNamespace(_agent);
OplogBufferCollection oplogBuffer(_storageInterface, nss);
- oplogBuffer.startup(_txn.get());
- ASSERT_TRUE(AutoGetCollectionForRead(_txn.get(), nss).getCollection());
- oplogBuffer.shutdown(_txn.get());
- ASSERT_FALSE(AutoGetCollectionForRead(_txn.get(), nss).getCollection());
+ oplogBuffer.startup(_opCtx.get());
+ ASSERT_TRUE(AutoGetCollectionForRead(_opCtx.get(), nss).getCollection());
+ oplogBuffer.shutdown(_opCtx.get());
+ ASSERT_FALSE(AutoGetCollectionForRead(_opCtx.get(), nss).getCollection());
}
TEST_F(OplogBufferCollectionTest, extractEmbeddedOplogDocumentChangesIdToTimestamp) {
@@ -250,7 +250,7 @@ DEATH_TEST_F(OplogBufferCollectionTest,
/**
* Check collection contents. OplogInterface returns documents in reverse natural order.
*/
-void _assertDocumentsInCollectionEquals(OperationContext* txn,
+void _assertDocumentsInCollectionEquals(OperationContext* opCtx,
const NamespaceString& nss,
const std::vector<BSONObj>& docs) {
std::vector<BSONObj> reversedTransformedDocs;
@@ -269,7 +269,7 @@ void _assertDocumentsInCollectionEquals(OperationContext* txn,
ASSERT_GT(ts, previousTimestamp);
}
std::reverse(reversedTransformedDocs.begin(), reversedTransformedDocs.end());
- OplogInterfaceLocal oplog(txn, nss.ns());
+ OplogInterfaceLocal oplog(opCtx, nss.ns());
auto iter = oplog.makeIterator();
for (const auto& doc : reversedTransformedDocs) {
ASSERT_BSONOBJ_EQ(doc, unittest::assertGet(iter->next()).first);
@@ -281,13 +281,13 @@ TEST_F(OplogBufferCollectionTest, PushOneDocumentWithPushAllNonBlockingAddsDocum
auto nss = makeNamespace(_agent);
OplogBufferCollection oplogBuffer(_storageInterface, nss);
- oplogBuffer.startup(_txn.get());
+ oplogBuffer.startup(_opCtx.get());
const std::vector<BSONObj> oplog = {makeOplogEntry(1)};
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
- oplogBuffer.pushAllNonBlocking(_txn.get(), oplog.begin(), oplog.end());
+ oplogBuffer.pushAllNonBlocking(_opCtx.get(), oplog.begin(), oplog.end());
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
- _assertDocumentsInCollectionEquals(_txn.get(), nss, oplog);
+ _assertDocumentsInCollectionEquals(_opCtx.get(), nss, oplog);
}
TEST_F(OplogBufferCollectionTest,
@@ -295,7 +295,7 @@ TEST_F(OplogBufferCollectionTest,
auto nss = makeNamespace(_agent);
OplogBufferCollection oplogBuffer(_storageInterface, nss);
- oplogBuffer.startup(_txn.get());
+ oplogBuffer.startup(_opCtx.get());
const std::vector<BSONObj> emptyOplogEntries;
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
oplogBuffer.pushAllNonBlocking(nullptr, emptyOplogEntries.begin(), emptyOplogEntries.end());
@@ -306,163 +306,163 @@ TEST_F(OplogBufferCollectionTest, PushOneDocumentWithPushAddsDocument) {
auto nss = makeNamespace(_agent);
OplogBufferCollection oplogBuffer(_storageInterface, nss);
- oplogBuffer.startup(_txn.get());
+ oplogBuffer.startup(_opCtx.get());
BSONObj oplog = makeOplogEntry(1);
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
- oplogBuffer.push(_txn.get(), oplog);
+ oplogBuffer.push(_opCtx.get(), oplog);
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
- _assertDocumentsInCollectionEquals(_txn.get(), nss, {oplog});
+ _assertDocumentsInCollectionEquals(_opCtx.get(), nss, {oplog});
}
TEST_F(OplogBufferCollectionTest, PushOneDocumentWithPushEvenIfFullAddsDocument) {
auto nss = makeNamespace(_agent);
OplogBufferCollection oplogBuffer(_storageInterface, nss);
- oplogBuffer.startup(_txn.get());
+ oplogBuffer.startup(_opCtx.get());
BSONObj oplog = makeOplogEntry(1);
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
- oplogBuffer.pushEvenIfFull(_txn.get(), oplog);
+ oplogBuffer.pushEvenIfFull(_opCtx.get(), oplog);
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
ASSERT_EQUALS(0UL, oplogBuffer.getSentinelCount_forTest());
- _assertDocumentsInCollectionEquals(_txn.get(), nss, {oplog});
+ _assertDocumentsInCollectionEquals(_opCtx.get(), nss, {oplog});
}
TEST_F(OplogBufferCollectionTest, PeekDoesNotRemoveDocument) {
auto nss = makeNamespace(_agent);
OplogBufferCollection oplogBuffer(_storageInterface, nss);
- oplogBuffer.startup(_txn.get());
+ oplogBuffer.startup(_opCtx.get());
BSONObj oplog1 = makeOplogEntry(1);
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
- oplogBuffer.push(_txn.get(), oplog1);
+ oplogBuffer.push(_opCtx.get(), oplog1);
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
// _peekOneSide should provide correct bound inclusion to storage engine when collection has one
// document.
BSONObj doc;
- ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(doc, oplog1);
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
BSONObj oplog2 = makeOplogEntry(2);
- oplogBuffer.push(_txn.get(), oplog2);
+ oplogBuffer.push(_opCtx.get(), oplog2);
ASSERT_EQUALS(oplogBuffer.getCount(), 2UL);
// _peekOneSide should return same result after adding new oplog entry.
- ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(doc, oplog1);
ASSERT_EQUALS(oplogBuffer.getCount(), 2UL);
- _assertDocumentsInCollectionEquals(_txn.get(), nss, {oplog1, oplog2});
+ _assertDocumentsInCollectionEquals(_opCtx.get(), nss, {oplog1, oplog2});
}
TEST_F(OplogBufferCollectionTest, PeekWithNoDocumentsReturnsFalse) {
auto nss = makeNamespace(_agent);
OplogBufferCollection oplogBuffer(_storageInterface, nss);
- oplogBuffer.startup(_txn.get());
+ oplogBuffer.startup(_opCtx.get());
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
BSONObj doc;
- ASSERT_FALSE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_FALSE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_TRUE(doc.isEmpty());
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
- _assertDocumentsInCollectionEquals(_txn.get(), nss, {});
+ _assertDocumentsInCollectionEquals(_opCtx.get(), nss, {});
}
TEST_F(OplogBufferCollectionTest, PopDoesNotRemoveDocumentFromCollection) {
auto nss = makeNamespace(_agent);
OplogBufferCollection oplogBuffer(_storageInterface, nss);
- oplogBuffer.startup(_txn.get());
+ oplogBuffer.startup(_opCtx.get());
BSONObj oplog = makeOplogEntry(1);
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
- oplogBuffer.push(_txn.get(), oplog);
+ oplogBuffer.push(_opCtx.get(), oplog);
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
BSONObj doc;
- ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(doc, oplog);
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
- _assertDocumentsInCollectionEquals(_txn.get(), nss, {oplog});
+ _assertDocumentsInCollectionEquals(_opCtx.get(), nss, {oplog});
}
TEST_F(OplogBufferCollectionTest, PopWithNoDocumentsReturnsFalse) {
auto nss = makeNamespace(_agent);
OplogBufferCollection oplogBuffer(_storageInterface, nss);
- oplogBuffer.startup(_txn.get());
+ oplogBuffer.startup(_opCtx.get());
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
BSONObj doc;
- ASSERT_FALSE(oplogBuffer.tryPop(_txn.get(), &doc));
+ ASSERT_FALSE(oplogBuffer.tryPop(_opCtx.get(), &doc));
ASSERT_TRUE(doc.isEmpty());
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
- _assertDocumentsInCollectionEquals(_txn.get(), nss, {});
+ _assertDocumentsInCollectionEquals(_opCtx.get(), nss, {});
}
TEST_F(OplogBufferCollectionTest, PopAndPeekReturnDocumentsInOrder) {
auto nss = makeNamespace(_agent);
OplogBufferCollection oplogBuffer(_storageInterface, nss);
- oplogBuffer.startup(_txn.get());
+ oplogBuffer.startup(_opCtx.get());
const std::vector<BSONObj> oplog = {
makeOplogEntry(1), makeOplogEntry(2), makeOplogEntry(3),
};
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
- oplogBuffer.pushAllNonBlocking(_txn.get(), oplog.begin(), oplog.end());
+ oplogBuffer.pushAllNonBlocking(_opCtx.get(), oplog.begin(), oplog.end());
ASSERT_EQUALS(oplogBuffer.getCount(), 3UL);
- _assertDocumentsInCollectionEquals(_txn.get(), nss, oplog);
+ _assertDocumentsInCollectionEquals(_opCtx.get(), nss, oplog);
BSONObj doc;
- ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(doc, oplog[0]);
ASSERT_EQUALS(oplogBuffer.getCount(), 3UL);
- ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(doc, oplog[0]);
ASSERT_EQUALS(oplogBuffer.getCount(), 2UL);
- ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(doc, oplog[1]);
ASSERT_EQUALS(oplogBuffer.getCount(), 2UL);
- ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(doc, oplog[1]);
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
- ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(doc, oplog[2]);
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
- ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(doc, oplog[2]);
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
// tryPop does not remove documents from collection.
- _assertDocumentsInCollectionEquals(_txn.get(), nss, oplog);
+ _assertDocumentsInCollectionEquals(_opCtx.get(), nss, oplog);
}
TEST_F(OplogBufferCollectionTest, LastObjectPushedReturnsNewestOplogEntry) {
auto nss = makeNamespace(_agent);
OplogBufferCollection oplogBuffer(_storageInterface, nss);
- oplogBuffer.startup(_txn.get());
+ oplogBuffer.startup(_opCtx.get());
const std::vector<BSONObj> oplog = {
makeOplogEntry(1), makeOplogEntry(2), makeOplogEntry(3),
};
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
- oplogBuffer.pushAllNonBlocking(_txn.get(), oplog.begin(), oplog.end());
+ oplogBuffer.pushAllNonBlocking(_opCtx.get(), oplog.begin(), oplog.end());
ASSERT_EQUALS(oplogBuffer.getCount(), 3UL);
- auto doc = oplogBuffer.lastObjectPushed(_txn.get());
+ auto doc = oplogBuffer.lastObjectPushed(_opCtx.get());
ASSERT_BSONOBJ_EQ(*doc, oplog[2]);
ASSERT_EQUALS(oplogBuffer.getCount(), 3UL);
}
@@ -471,9 +471,9 @@ TEST_F(OplogBufferCollectionTest, LastObjectPushedReturnsNoneWithNoEntries) {
auto nss = makeNamespace(_agent);
OplogBufferCollection oplogBuffer(_storageInterface, nss);
- oplogBuffer.startup(_txn.get());
+ oplogBuffer.startup(_opCtx.get());
- auto doc = oplogBuffer.lastObjectPushed(_txn.get());
+ auto doc = oplogBuffer.lastObjectPushed(_opCtx.get());
ASSERT_EQUALS(doc, boost::none);
}
@@ -481,10 +481,10 @@ TEST_F(OplogBufferCollectionTest, IsEmptyReturnsTrueWhenEmptyAndFalseWhenNot) {
auto nss = makeNamespace(_agent);
OplogBufferCollection oplogBuffer(_storageInterface, nss);
- oplogBuffer.startup(_txn.get());
+ oplogBuffer.startup(_opCtx.get());
BSONObj oplog = makeOplogEntry(1);
ASSERT_TRUE(oplogBuffer.isEmpty());
- oplogBuffer.pushEvenIfFull(_txn.get(), oplog);
+ oplogBuffer.pushEvenIfFull(_opCtx.get(), oplog);
ASSERT_FALSE(oplogBuffer.isEmpty());
}
@@ -492,7 +492,7 @@ TEST_F(OplogBufferCollectionTest, ClearClearsCollection) {
auto nss = makeNamespace(_agent);
OplogBufferCollection oplogBuffer(_storageInterface, nss);
- oplogBuffer.startup(_txn.get());
+ oplogBuffer.startup(_opCtx.get());
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
ASSERT_EQUALS(oplogBuffer.getSize(), 0UL);
ASSERT_EQUALS(0U, oplogBuffer.getSentinelCount_forTest());
@@ -500,27 +500,27 @@ TEST_F(OplogBufferCollectionTest, ClearClearsCollection) {
ASSERT_EQUALS(Timestamp(), oplogBuffer.getLastPoppedTimestamp_forTest());
BSONObj oplog = makeOplogEntry(1);
- oplogBuffer.push(_txn.get(), oplog);
+ oplogBuffer.push(_opCtx.get(), oplog);
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
ASSERT_EQUALS(oplogBuffer.getSize(), std::size_t(oplog.objsize()));
ASSERT_EQUALS(0U, oplogBuffer.getSentinelCount_forTest());
ASSERT_EQUALS(oplog["ts"].timestamp(), oplogBuffer.getLastPushedTimestamp_forTest());
ASSERT_EQUALS(Timestamp(), oplogBuffer.getLastPoppedTimestamp_forTest());
- _assertDocumentsInCollectionEquals(_txn.get(), nss, {oplog});
+ _assertDocumentsInCollectionEquals(_opCtx.get(), nss, {oplog});
BSONObj sentinel;
- oplogBuffer.push(_txn.get(), sentinel);
+ oplogBuffer.push(_opCtx.get(), sentinel);
ASSERT_EQUALS(oplogBuffer.getCount(), 2UL);
ASSERT_EQUALS(oplogBuffer.getSize(), std::size_t(oplog.objsize() + BSONObj().objsize()));
ASSERT_EQUALS(1U, oplogBuffer.getSentinelCount_forTest());
ASSERT_EQUALS(oplog["ts"].timestamp(), oplogBuffer.getLastPushedTimestamp_forTest());
ASSERT_EQUALS(Timestamp(), oplogBuffer.getLastPoppedTimestamp_forTest());
- _assertDocumentsInCollectionEquals(_txn.get(), nss, {oplog, sentinel});
+ _assertDocumentsInCollectionEquals(_opCtx.get(), nss, {oplog, sentinel});
BSONObj oplog2 = makeOplogEntry(2);
- oplogBuffer.push(_txn.get(), oplog2);
+ oplogBuffer.push(_opCtx.get(), oplog2);
ASSERT_EQUALS(oplogBuffer.getCount(), 3UL);
ASSERT_EQUALS(oplogBuffer.getSize(),
std::size_t(oplog.objsize() + BSONObj().objsize() + oplog2.objsize()));
@@ -528,10 +528,10 @@ TEST_F(OplogBufferCollectionTest, ClearClearsCollection) {
ASSERT_EQUALS(oplog2["ts"].timestamp(), oplogBuffer.getLastPushedTimestamp_forTest());
ASSERT_EQUALS(Timestamp(), oplogBuffer.getLastPoppedTimestamp_forTest());
- _assertDocumentsInCollectionEquals(_txn.get(), nss, {oplog, sentinel, oplog2});
+ _assertDocumentsInCollectionEquals(_opCtx.get(), nss, {oplog, sentinel, oplog2});
BSONObj poppedDoc;
- ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &poppedDoc));
+ ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &poppedDoc));
ASSERT_BSONOBJ_EQ(oplog, poppedDoc);
ASSERT_EQUALS(oplogBuffer.getCount(), 2UL);
ASSERT_EQUALS(oplogBuffer.getSize(), std::size_t(BSONObj().objsize() + oplog2.objsize()));
@@ -539,29 +539,29 @@ TEST_F(OplogBufferCollectionTest, ClearClearsCollection) {
ASSERT_EQUALS(oplog2["ts"].timestamp(), oplogBuffer.getLastPushedTimestamp_forTest());
ASSERT_EQUALS(oplog["ts"].timestamp(), oplogBuffer.getLastPoppedTimestamp_forTest());
- _assertDocumentsInCollectionEquals(_txn.get(), nss, {oplog, sentinel, oplog2});
+ _assertDocumentsInCollectionEquals(_opCtx.get(), nss, {oplog, sentinel, oplog2});
- oplogBuffer.clear(_txn.get());
- ASSERT_TRUE(AutoGetCollectionForRead(_txn.get(), nss).getCollection());
+ oplogBuffer.clear(_opCtx.get());
+ ASSERT_TRUE(AutoGetCollectionForRead(_opCtx.get(), nss).getCollection());
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
ASSERT_EQUALS(oplogBuffer.getSize(), 0UL);
ASSERT_EQUALS(0U, oplogBuffer.getSentinelCount_forTest());
ASSERT_EQUALS(Timestamp(), oplogBuffer.getLastPushedTimestamp_forTest());
ASSERT_EQUALS(Timestamp(), oplogBuffer.getLastPoppedTimestamp_forTest());
- _assertDocumentsInCollectionEquals(_txn.get(), nss, {});
+ _assertDocumentsInCollectionEquals(_opCtx.get(), nss, {});
BSONObj doc;
- ASSERT_FALSE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_FALSE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_TRUE(doc.isEmpty());
- ASSERT_FALSE(oplogBuffer.tryPop(_txn.get(), &doc));
+ ASSERT_FALSE(oplogBuffer.tryPop(_opCtx.get(), &doc));
ASSERT_TRUE(doc.isEmpty());
}
TEST_F(OplogBufferCollectionTest, WaitForDataBlocksAndFindsDocument) {
auto nss = makeNamespace(_agent);
OplogBufferCollection oplogBuffer(_storageInterface, nss);
- oplogBuffer.startup(_txn.get());
+ oplogBuffer.startup(_opCtx.get());
unittest::Barrier barrier(2U);
BSONObj oplog = makeOplogEntry(1);
@@ -578,11 +578,11 @@ TEST_F(OplogBufferCollectionTest, WaitForDataBlocksAndFindsDocument) {
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
barrier.countDownAndWait();
- oplogBuffer.push(_txn.get(), oplog);
+ oplogBuffer.push(_opCtx.get(), oplog);
peekingThread.join();
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
ASSERT_TRUE(success);
- ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(doc, oplog);
ASSERT_EQUALS(count, 1UL);
}
@@ -590,7 +590,7 @@ TEST_F(OplogBufferCollectionTest, WaitForDataBlocksAndFindsDocument) {
TEST_F(OplogBufferCollectionTest, TwoWaitForDataInvocationsBlockAndFindSameDocument) {
auto nss = makeNamespace(_agent);
OplogBufferCollection oplogBuffer(_storageInterface, nss);
- oplogBuffer.startup(_txn.get());
+ oplogBuffer.startup(_opCtx.get());
unittest::Barrier barrier(3U);
BSONObj oplog = makeOplogEntry(1);
@@ -616,13 +616,13 @@ TEST_F(OplogBufferCollectionTest, TwoWaitForDataInvocationsBlockAndFindSameDocum
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
barrier.countDownAndWait();
- oplogBuffer.push(_txn.get(), oplog);
+ oplogBuffer.push(_opCtx.get(), oplog);
peekingThread1.join();
peekingThread2.join();
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
ASSERT_TRUE(success1);
BSONObj doc;
- ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(doc, oplog);
ASSERT_EQUALS(count1, 1UL);
ASSERT_TRUE(success2);
@@ -632,7 +632,7 @@ TEST_F(OplogBufferCollectionTest, TwoWaitForDataInvocationsBlockAndFindSameDocum
TEST_F(OplogBufferCollectionTest, WaitForDataBlocksAndTimesOutWhenItDoesNotFindDocument) {
auto nss = makeNamespace(_agent);
OplogBufferCollection oplogBuffer(_storageInterface, nss);
- oplogBuffer.startup(_txn.get());
+ oplogBuffer.startup(_opCtx.get());
BSONObj doc;
bool success = false;
@@ -648,95 +648,95 @@ TEST_F(OplogBufferCollectionTest, WaitForDataBlocksAndTimesOutWhenItDoesNotFindD
peekingThread.join();
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
ASSERT_FALSE(success);
- ASSERT_FALSE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_FALSE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_TRUE(doc.isEmpty());
ASSERT_EQUALS(count, 0UL);
}
void _testPushSentinelsProperly(
- OperationContext* txn,
+ OperationContext* opCtx,
const NamespaceString& nss,
StorageInterface* storageInterface,
- stdx::function<void(OperationContext* txn,
+ stdx::function<void(OperationContext* opCtx,
OplogBufferCollection* oplogBuffer,
const std::vector<BSONObj>& oplog)> pushDocsFn) {
OplogBufferCollection oplogBuffer(storageInterface, nss);
- oplogBuffer.startup(txn);
+ oplogBuffer.startup(opCtx);
const std::vector<BSONObj> oplog = {
BSONObj(), makeOplogEntry(1), BSONObj(), BSONObj(), makeOplogEntry(2), BSONObj(),
};
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
- pushDocsFn(txn, &oplogBuffer, oplog);
+ pushDocsFn(opCtx, &oplogBuffer, oplog);
ASSERT_EQUALS(oplogBuffer.getCount(), 6UL);
- _assertDocumentsInCollectionEquals(txn, nss, oplog);
+ _assertDocumentsInCollectionEquals(opCtx, nss, oplog);
}
TEST_F(OplogBufferCollectionTest, PushPushesOnSentinelsProperly) {
auto nss = makeNamespace(_agent);
- _testPushSentinelsProperly(_txn.get(),
+ _testPushSentinelsProperly(_opCtx.get(),
nss,
_storageInterface,
- [](OperationContext* txn,
+ [](OperationContext* opCtx,
OplogBufferCollection* oplogBuffer,
const std::vector<BSONObj>& oplog) {
- oplogBuffer->push(txn, oplog[0]);
+ oplogBuffer->push(opCtx, oplog[0]);
ASSERT_EQUALS(1U, oplogBuffer->getSentinelCount_forTest());
- oplogBuffer->push(txn, oplog[1]);
+ oplogBuffer->push(opCtx, oplog[1]);
ASSERT_EQUALS(0U, oplogBuffer->getSentinelCount_forTest());
- oplogBuffer->push(txn, oplog[2]);
+ oplogBuffer->push(opCtx, oplog[2]);
ASSERT_EQUALS(1U, oplogBuffer->getSentinelCount_forTest());
- oplogBuffer->push(txn, oplog[3]);
+ oplogBuffer->push(opCtx, oplog[3]);
ASSERT_EQUALS(2U, oplogBuffer->getSentinelCount_forTest());
- oplogBuffer->push(txn, oplog[4]);
+ oplogBuffer->push(opCtx, oplog[4]);
ASSERT_EQUALS(0U, oplogBuffer->getSentinelCount_forTest());
- oplogBuffer->push(txn, oplog[5]);
+ oplogBuffer->push(opCtx, oplog[5]);
ASSERT_EQUALS(1U, oplogBuffer->getSentinelCount_forTest());
});
}
TEST_F(OplogBufferCollectionTest, PushEvenIfFullPushesOnSentinelsProperly) {
auto nss = makeNamespace(_agent);
- _testPushSentinelsProperly(_txn.get(),
+ _testPushSentinelsProperly(_opCtx.get(),
nss,
_storageInterface,
- [](OperationContext* txn,
+ [](OperationContext* opCtx,
OplogBufferCollection* oplogBuffer,
const std::vector<BSONObj>& oplog) {
- oplogBuffer->pushEvenIfFull(txn, oplog[0]);
+ oplogBuffer->pushEvenIfFull(opCtx, oplog[0]);
ASSERT_EQUALS(1U, oplogBuffer->getSentinelCount_forTest());
- oplogBuffer->pushEvenIfFull(txn, oplog[1]);
+ oplogBuffer->pushEvenIfFull(opCtx, oplog[1]);
ASSERT_EQUALS(0U, oplogBuffer->getSentinelCount_forTest());
- oplogBuffer->pushEvenIfFull(txn, oplog[2]);
+ oplogBuffer->pushEvenIfFull(opCtx, oplog[2]);
ASSERT_EQUALS(1U, oplogBuffer->getSentinelCount_forTest());
- oplogBuffer->pushEvenIfFull(txn, oplog[3]);
+ oplogBuffer->pushEvenIfFull(opCtx, oplog[3]);
ASSERT_EQUALS(2U, oplogBuffer->getSentinelCount_forTest());
- oplogBuffer->pushEvenIfFull(txn, oplog[4]);
+ oplogBuffer->pushEvenIfFull(opCtx, oplog[4]);
ASSERT_EQUALS(0U, oplogBuffer->getSentinelCount_forTest());
- oplogBuffer->pushEvenIfFull(txn, oplog[5]);
+ oplogBuffer->pushEvenIfFull(opCtx, oplog[5]);
ASSERT_EQUALS(1U, oplogBuffer->getSentinelCount_forTest());
});
}
TEST_F(OplogBufferCollectionTest, PushAllNonBlockingPushesOnSentinelsProperly) {
auto nss = makeNamespace(_agent);
- _testPushSentinelsProperly(_txn.get(),
+ _testPushSentinelsProperly(_opCtx.get(),
nss,
_storageInterface,
- [](OperationContext* txn,
+ [](OperationContext* opCtx,
OplogBufferCollection* oplogBuffer,
const std::vector<BSONObj>& oplog) {
oplogBuffer->pushAllNonBlocking(
- txn, oplog.cbegin(), oplog.cend());
+ opCtx, oplog.cbegin(), oplog.cend());
ASSERT_EQUALS(1U, oplogBuffer->getSentinelCount_forTest());
});
}
@@ -748,207 +748,207 @@ DEATH_TEST_F(
auto nss = makeNamespace(_agent);
OplogBufferCollection oplogBuffer(_storageInterface, nss);
- oplogBuffer.startup(_txn.get());
+ oplogBuffer.startup(_opCtx.get());
const std::vector<BSONObj> oplog = {
makeOplogEntry(2), makeOplogEntry(1),
};
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
- oplogBuffer.pushAllNonBlocking(_txn.get(), oplog.begin(), oplog.end());
+ oplogBuffer.pushAllNonBlocking(_opCtx.get(), oplog.begin(), oplog.end());
}
TEST_F(OplogBufferCollectionTest, SentinelInMiddleIsReturnedInOrder) {
auto nss = makeNamespace(_agent);
OplogBufferCollection oplogBuffer(_storageInterface, nss);
- oplogBuffer.startup(_txn.get());
+ oplogBuffer.startup(_opCtx.get());
const std::vector<BSONObj> oplog = {
makeOplogEntry(1), makeOplogEntry(2), BSONObj(), makeOplogEntry(3),
};
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
- oplogBuffer.pushEvenIfFull(_txn.get(), oplog[0]);
- oplogBuffer.pushEvenIfFull(_txn.get(), oplog[1]);
- oplogBuffer.pushEvenIfFull(_txn.get(), oplog[2]);
- oplogBuffer.pushEvenIfFull(_txn.get(), oplog[3]);
+ oplogBuffer.pushEvenIfFull(_opCtx.get(), oplog[0]);
+ oplogBuffer.pushEvenIfFull(_opCtx.get(), oplog[1]);
+ oplogBuffer.pushEvenIfFull(_opCtx.get(), oplog[2]);
+ oplogBuffer.pushEvenIfFull(_opCtx.get(), oplog[3]);
ASSERT_EQUALS(oplogBuffer.getCount(), 4UL);
- _assertDocumentsInCollectionEquals(_txn.get(), nss, oplog);
+ _assertDocumentsInCollectionEquals(_opCtx.get(), nss, oplog);
BSONObj doc;
- ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(doc, oplog[0]);
ASSERT_EQUALS(oplogBuffer.getCount(), 4UL);
- ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(doc, oplog[0]);
ASSERT_EQUALS(oplogBuffer.getCount(), 3UL);
- ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(doc, oplog[1]);
ASSERT_EQUALS(oplogBuffer.getCount(), 3UL);
- ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(doc, oplog[1]);
ASSERT_EQUALS(oplogBuffer.getCount(), 2UL);
- ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_TRUE(doc.isEmpty());
ASSERT_EQUALS(oplogBuffer.getCount(), 2UL);
- ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc));
ASSERT_TRUE(doc.isEmpty());
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
- ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(doc, oplog[3]);
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
- ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(doc, oplog[3]);
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
// tryPop does not remove documents from collection.
- _assertDocumentsInCollectionEquals(_txn.get(), nss, oplog);
+ _assertDocumentsInCollectionEquals(_opCtx.get(), nss, oplog);
}
TEST_F(OplogBufferCollectionTest, SentinelAtBeginningIsReturnedAtBeginning) {
auto nss = makeNamespace(_agent);
OplogBufferCollection oplogBuffer(_storageInterface, nss);
- oplogBuffer.startup(_txn.get());
+ oplogBuffer.startup(_opCtx.get());
const std::vector<BSONObj> oplog = {BSONObj(), makeOplogEntry(1)};
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
- oplogBuffer.pushEvenIfFull(_txn.get(), oplog[0]);
- oplogBuffer.pushEvenIfFull(_txn.get(), oplog[1]);
+ oplogBuffer.pushEvenIfFull(_opCtx.get(), oplog[0]);
+ oplogBuffer.pushEvenIfFull(_opCtx.get(), oplog[1]);
ASSERT_EQUALS(oplogBuffer.getCount(), 2UL);
- _assertDocumentsInCollectionEquals(_txn.get(), nss, oplog);
+ _assertDocumentsInCollectionEquals(_opCtx.get(), nss, oplog);
BSONObj doc;
- ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_TRUE(doc.isEmpty());
ASSERT_EQUALS(oplogBuffer.getCount(), 2UL);
- ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc));
ASSERT_TRUE(doc.isEmpty());
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
- ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(doc, oplog[1]);
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
- ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(doc, oplog[1]);
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
// tryPop does not remove documents from collection.
- _assertDocumentsInCollectionEquals(_txn.get(), nss, oplog);
+ _assertDocumentsInCollectionEquals(_opCtx.get(), nss, oplog);
}
TEST_F(OplogBufferCollectionTest, SentinelAtEndIsReturnedAtEnd) {
auto nss = makeNamespace(_agent);
OplogBufferCollection oplogBuffer(_storageInterface, nss);
- oplogBuffer.startup(_txn.get());
+ oplogBuffer.startup(_opCtx.get());
const std::vector<BSONObj> oplog = {makeOplogEntry(1), BSONObj()};
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
- oplogBuffer.pushEvenIfFull(_txn.get(), oplog[0]);
- oplogBuffer.pushEvenIfFull(_txn.get(), oplog[1]);
+ oplogBuffer.pushEvenIfFull(_opCtx.get(), oplog[0]);
+ oplogBuffer.pushEvenIfFull(_opCtx.get(), oplog[1]);
ASSERT_EQUALS(oplogBuffer.getCount(), 2UL);
- _assertDocumentsInCollectionEquals(_txn.get(), nss, oplog);
+ _assertDocumentsInCollectionEquals(_opCtx.get(), nss, oplog);
BSONObj doc;
- ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(doc, oplog[0]);
ASSERT_EQUALS(oplogBuffer.getCount(), 2UL);
- ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(doc, oplog[0]);
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
- ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_TRUE(doc.isEmpty());
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
- ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc));
ASSERT_TRUE(doc.isEmpty());
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
// tryPop does not remove documents from collection.
- _assertDocumentsInCollectionEquals(_txn.get(), nss, oplog);
+ _assertDocumentsInCollectionEquals(_opCtx.get(), nss, oplog);
}
TEST_F(OplogBufferCollectionTest, MultipleSentinelsAreReturnedInOrder) {
auto nss = makeNamespace(_agent);
OplogBufferCollection oplogBuffer(_storageInterface, nss);
- oplogBuffer.startup(_txn.get());
+ oplogBuffer.startup(_opCtx.get());
const std::vector<BSONObj> oplog = {
BSONObj(), makeOplogEntry(1), BSONObj(), BSONObj(), makeOplogEntry(2), BSONObj(),
};
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
- oplogBuffer.pushAllNonBlocking(_txn.get(), oplog.cbegin(), oplog.cend());
+ oplogBuffer.pushAllNonBlocking(_opCtx.get(), oplog.cbegin(), oplog.cend());
ASSERT_EQUALS(oplogBuffer.getCount(), 6UL);
- _assertDocumentsInCollectionEquals(_txn.get(), nss, oplog);
+ _assertDocumentsInCollectionEquals(_opCtx.get(), nss, oplog);
BSONObj doc;
- ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_TRUE(doc.isEmpty());
ASSERT_EQUALS(oplogBuffer.getCount(), 6UL);
- ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc));
ASSERT_TRUE(doc.isEmpty());
ASSERT_EQUALS(oplogBuffer.getCount(), 5UL);
- ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(doc, oplog[1]);
ASSERT_EQUALS(oplogBuffer.getCount(), 5UL);
- ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(doc, oplog[1]);
ASSERT_EQUALS(oplogBuffer.getCount(), 4UL);
- ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_TRUE(doc.isEmpty());
ASSERT_EQUALS(oplogBuffer.getCount(), 4UL);
- ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc));
ASSERT_TRUE(doc.isEmpty());
ASSERT_EQUALS(oplogBuffer.getCount(), 3UL);
- ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_TRUE(doc.isEmpty());
ASSERT_EQUALS(oplogBuffer.getCount(), 3UL);
- ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc));
ASSERT_TRUE(doc.isEmpty());
ASSERT_EQUALS(oplogBuffer.getCount(), 2UL);
- ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(doc, oplog[4]);
ASSERT_EQUALS(oplogBuffer.getCount(), 2UL);
- ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(doc, oplog[4]);
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
- ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_TRUE(doc.isEmpty());
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
- ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc));
ASSERT_TRUE(doc.isEmpty());
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
// tryPop does not remove documents from collection.
- _assertDocumentsInCollectionEquals(_txn.get(), nss, oplog);
+ _assertDocumentsInCollectionEquals(_opCtx.get(), nss, oplog);
}
TEST_F(OplogBufferCollectionTest, WaitForDataBlocksAndFindsSentinel) {
auto nss = makeNamespace(_agent);
OplogBufferCollection oplogBuffer(_storageInterface, nss);
- oplogBuffer.startup(_txn.get());
+ oplogBuffer.startup(_opCtx.get());
unittest::Barrier barrier(2U);
BSONObj oplog;
@@ -965,11 +965,11 @@ TEST_F(OplogBufferCollectionTest, WaitForDataBlocksAndFindsSentinel) {
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
barrier.countDownAndWait();
- oplogBuffer.pushEvenIfFull(_txn.get(), oplog);
+ oplogBuffer.pushEvenIfFull(_opCtx.get(), oplog);
peekingThread.join();
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
ASSERT_TRUE(success);
- ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_TRUE(doc.isEmpty());
ASSERT_EQUALS(count, 1UL);
}
@@ -977,7 +977,7 @@ TEST_F(OplogBufferCollectionTest, WaitForDataBlocksAndFindsSentinel) {
TEST_F(OplogBufferCollectionTest, TwoWaitForDataInvocationsBlockAndFindSameSentinel) {
auto nss = makeNamespace(_agent);
OplogBufferCollection oplogBuffer(_storageInterface, nss);
- oplogBuffer.startup(_txn.get());
+ oplogBuffer.startup(_opCtx.get());
unittest::Barrier barrier(3U);
BSONObj oplog;
@@ -1003,13 +1003,13 @@ TEST_F(OplogBufferCollectionTest, TwoWaitForDataInvocationsBlockAndFindSameSenti
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
barrier.countDownAndWait();
- oplogBuffer.pushEvenIfFull(_txn.get(), oplog);
+ oplogBuffer.pushEvenIfFull(_opCtx.get(), oplog);
peekingThread1.join();
peekingThread2.join();
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
ASSERT_TRUE(success1);
BSONObj doc;
- ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_TRUE(doc.isEmpty());
ASSERT_EQUALS(count1, 1UL);
ASSERT_TRUE(success2);
@@ -1041,74 +1041,74 @@ TEST_F(OplogBufferCollectionTest, PeekFillsCacheWithDocumentsFromCollection) {
std::size_t peekCacheSize = 3U;
OplogBufferCollection oplogBuffer(_storageInterface, nss, _makeOptions(3));
ASSERT_EQUALS(peekCacheSize, oplogBuffer.getOptions().peekCacheSize);
- oplogBuffer.startup(_txn.get());
+ oplogBuffer.startup(_opCtx.get());
std::vector<BSONObj> oplog;
for (int i = 0; i < 5; ++i) {
oplog.push_back(makeOplogEntry(i + 1));
};
- oplogBuffer.pushAllNonBlocking(_txn.get(), oplog.cbegin(), oplog.cend());
- _assertDocumentsInCollectionEquals(_txn.get(), nss, oplog);
+ oplogBuffer.pushAllNonBlocking(_opCtx.get(), oplog.cbegin(), oplog.cend());
+ _assertDocumentsInCollectionEquals(_opCtx.get(), nss, oplog);
// Before any peek operations, peek cache should be empty.
_assertDocumentsEqualCache({}, oplogBuffer.getPeekCache_forTest());
// First peek operation should trigger a read of 'peekCacheSize' documents from the collection.
BSONObj doc;
- ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(oplog[0], doc);
_assertDocumentsEqualCache({oplog[0], oplog[1], oplog[2]}, oplogBuffer.getPeekCache_forTest());
// Repeated peek operation should not modify the cache.
- ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(oplog[0], doc);
_assertDocumentsEqualCache({oplog[0], oplog[1], oplog[2]}, oplogBuffer.getPeekCache_forTest());
// Pop operation should remove the first element in the cache
- ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(oplog[0], doc);
_assertDocumentsEqualCache({oplog[1], oplog[2]}, oplogBuffer.getPeekCache_forTest());
// Next peek operation should not modify the cache.
- ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(oplog[1], doc);
_assertDocumentsEqualCache({oplog[1], oplog[2]}, oplogBuffer.getPeekCache_forTest());
// Pop the rest of the items in the cache.
- ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(oplog[1], doc);
_assertDocumentsEqualCache({oplog[2]}, oplogBuffer.getPeekCache_forTest());
- ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(oplog[2], doc);
_assertDocumentsEqualCache({}, oplogBuffer.getPeekCache_forTest());
// Next peek operation should replenish the cache.
// Cache size will be less than the configured 'peekCacheSize' because
// there will not be enough documents left unread in the collection.
- ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(oplog[3], doc);
_assertDocumentsEqualCache({oplog[3], oplog[4]}, oplogBuffer.getPeekCache_forTest());
// Pop the remaining documents from the buffer.
- ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(oplog[3], doc);
_assertDocumentsEqualCache({oplog[4]}, oplogBuffer.getPeekCache_forTest());
// Verify state of cache between pops using peek.
- ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.peek(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(oplog[4], doc);
_assertDocumentsEqualCache({oplog[4]}, oplogBuffer.getPeekCache_forTest());
- ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
+ ASSERT_TRUE(oplogBuffer.tryPop(_opCtx.get(), &doc));
ASSERT_BSONOBJ_EQ(oplog[4], doc);
_assertDocumentsEqualCache({}, oplogBuffer.getPeekCache_forTest());
// Nothing left in the collection.
- ASSERT_FALSE(oplogBuffer.peek(_txn.get(), &doc));
+ ASSERT_FALSE(oplogBuffer.peek(_opCtx.get(), &doc));
_assertDocumentsEqualCache({}, oplogBuffer.getPeekCache_forTest());
- ASSERT_FALSE(oplogBuffer.tryPop(_txn.get(), &doc));
+ ASSERT_FALSE(oplogBuffer.tryPop(_opCtx.get(), &doc));
_assertDocumentsEqualCache({}, oplogBuffer.getPeekCache_forTest());
}
diff --git a/src/mongo/db/repl/oplog_buffer_proxy.cpp b/src/mongo/db/repl/oplog_buffer_proxy.cpp
index e2080a94f19..675bb4d6186 100644
--- a/src/mongo/db/repl/oplog_buffer_proxy.cpp
+++ b/src/mongo/db/repl/oplog_buffer_proxy.cpp
@@ -44,33 +44,33 @@ OplogBuffer* OplogBufferProxy::getTarget() const {
return _target.get();
}
-void OplogBufferProxy::startup(OperationContext* txn) {
- _target->startup(txn);
+void OplogBufferProxy::startup(OperationContext* opCtx) {
+ _target->startup(opCtx);
}
-void OplogBufferProxy::shutdown(OperationContext* txn) {
+void OplogBufferProxy::shutdown(OperationContext* opCtx) {
{
stdx::lock_guard<stdx::mutex> backLock(_lastPushedMutex);
stdx::lock_guard<stdx::mutex> frontLock(_lastPeekedMutex);
_lastPushed.reset();
_lastPeeked.reset();
}
- _target->shutdown(txn);
+ _target->shutdown(opCtx);
}
-void OplogBufferProxy::pushEvenIfFull(OperationContext* txn, const Value& value) {
+void OplogBufferProxy::pushEvenIfFull(OperationContext* opCtx, const Value& value) {
stdx::lock_guard<stdx::mutex> lk(_lastPushedMutex);
_lastPushed = value;
- _target->pushEvenIfFull(txn, value);
+ _target->pushEvenIfFull(opCtx, value);
}
-void OplogBufferProxy::push(OperationContext* txn, const Value& value) {
+void OplogBufferProxy::push(OperationContext* opCtx, const Value& value) {
stdx::lock_guard<stdx::mutex> lk(_lastPushedMutex);
_lastPushed = value;
- _target->push(txn, value);
+ _target->push(opCtx, value);
}
-void OplogBufferProxy::pushAllNonBlocking(OperationContext* txn,
+void OplogBufferProxy::pushAllNonBlocking(OperationContext* opCtx,
Batch::const_iterator begin,
Batch::const_iterator end) {
if (begin == end) {
@@ -78,11 +78,11 @@ void OplogBufferProxy::pushAllNonBlocking(OperationContext* txn,
}
stdx::lock_guard<stdx::mutex> lk(_lastPushedMutex);
_lastPushed = *(end - 1);
- _target->pushAllNonBlocking(txn, begin, end);
+ _target->pushAllNonBlocking(opCtx, begin, end);
}
-void OplogBufferProxy::waitForSpace(OperationContext* txn, std::size_t size) {
- _target->waitForSpace(txn, size);
+void OplogBufferProxy::waitForSpace(OperationContext* opCtx, std::size_t size) {
+ _target->waitForSpace(opCtx, size);
}
bool OplogBufferProxy::isEmpty() const {
@@ -101,18 +101,18 @@ std::size_t OplogBufferProxy::getCount() const {
return _target->getCount();
}
-void OplogBufferProxy::clear(OperationContext* txn) {
+void OplogBufferProxy::clear(OperationContext* opCtx) {
stdx::lock_guard<stdx::mutex> backLock(_lastPushedMutex);
stdx::lock_guard<stdx::mutex> frontLock(_lastPeekedMutex);
_lastPushed.reset();
_lastPeeked.reset();
- _target->clear(txn);
+ _target->clear(opCtx);
}
-bool OplogBufferProxy::tryPop(OperationContext* txn, Value* value) {
+bool OplogBufferProxy::tryPop(OperationContext* opCtx, Value* value) {
stdx::lock_guard<stdx::mutex> backLock(_lastPushedMutex);
stdx::lock_guard<stdx::mutex> frontLock(_lastPeekedMutex);
- if (!_target->tryPop(txn, value)) {
+ if (!_target->tryPop(opCtx, value)) {
return false;
}
_lastPeeked.reset();
@@ -133,13 +133,13 @@ bool OplogBufferProxy::waitForData(Seconds waitDuration) {
return _target->waitForData(waitDuration);
}
-bool OplogBufferProxy::peek(OperationContext* txn, Value* value) {
+bool OplogBufferProxy::peek(OperationContext* opCtx, Value* value) {
stdx::lock_guard<stdx::mutex> lk(_lastPeekedMutex);
if (_lastPeeked) {
*value = *_lastPeeked;
return true;
}
- if (_target->peek(txn, value)) {
+ if (_target->peek(opCtx, value)) {
_lastPeeked = *value;
return true;
}
@@ -147,7 +147,7 @@ bool OplogBufferProxy::peek(OperationContext* txn, Value* value) {
}
boost::optional<OplogBuffer::Value> OplogBufferProxy::lastObjectPushed(
- OperationContext* txn) const {
+ OperationContext* opCtx) const {
stdx::lock_guard<stdx::mutex> lk(_lastPushedMutex);
if (!_lastPushed) {
return boost::none;
diff --git a/src/mongo/db/repl/oplog_buffer_proxy.h b/src/mongo/db/repl/oplog_buffer_proxy.h
index 2624a9bb68f..ae9be1340ff 100644
--- a/src/mongo/db/repl/oplog_buffer_proxy.h
+++ b/src/mongo/db/repl/oplog_buffer_proxy.h
@@ -55,23 +55,23 @@ public:
*/
OplogBuffer* getTarget() const;
- void startup(OperationContext* txn) override;
- void shutdown(OperationContext* txn) override;
- void pushEvenIfFull(OperationContext* txn, const Value& value) override;
- void push(OperationContext* txn, const Value& value) override;
- void pushAllNonBlocking(OperationContext* txn,
+ void startup(OperationContext* opCtx) override;
+ void shutdown(OperationContext* opCtx) override;
+ void pushEvenIfFull(OperationContext* opCtx, const Value& value) override;
+ void push(OperationContext* opCtx, const Value& value) override;
+ void pushAllNonBlocking(OperationContext* opCtx,
Batch::const_iterator begin,
Batch::const_iterator end) override;
- void waitForSpace(OperationContext* txn, std::size_t size) override;
+ void waitForSpace(OperationContext* opCtx, std::size_t size) override;
bool isEmpty() const override;
std::size_t getMaxSize() const override;
std::size_t getSize() const override;
std::size_t getCount() const override;
- void clear(OperationContext* txn) override;
- bool tryPop(OperationContext* txn, Value* value) override;
+ void clear(OperationContext* opCtx) override;
+ bool tryPop(OperationContext* opCtx, Value* value) override;
bool waitForData(Seconds waitDuration) override;
- bool peek(OperationContext* txn, Value* value) override;
- boost::optional<Value> lastObjectPushed(OperationContext* txn) const override;
+ bool peek(OperationContext* opCtx, Value* value) override;
+ boost::optional<Value> lastObjectPushed(OperationContext* opCtx) const override;
// ---- Testing API ----
boost::optional<Value> getLastPeeked_forTest() const;
diff --git a/src/mongo/db/repl/oplog_buffer_proxy_test.cpp b/src/mongo/db/repl/oplog_buffer_proxy_test.cpp
index a59a66f0979..a372eea8c85 100644
--- a/src/mongo/db/repl/oplog_buffer_proxy_test.cpp
+++ b/src/mongo/db/repl/oplog_buffer_proxy_test.cpp
@@ -57,17 +57,17 @@ public:
void shutdown(OperationContext*) override {
shutdownCalled = true;
}
- void pushEvenIfFull(OperationContext* txn, const Value& value) override {
- push(txn, value);
+ void pushEvenIfFull(OperationContext* opCtx, const Value& value) override {
+ push(opCtx, value);
}
void push(OperationContext*, const Value& value) override {
values.push_back(value);
}
- void pushAllNonBlocking(OperationContext* txn,
+ void pushAllNonBlocking(OperationContext* opCtx,
Batch::const_iterator begin,
Batch::const_iterator end) override {
for (auto i = begin; i != end; ++i) {
- push(txn, *i);
+ push(opCtx, *i);
}
}
void waitForSpace(OperationContext*, std::size_t) override {
@@ -92,9 +92,9 @@ public:
void clear(OperationContext*) override {
values.clear();
}
- bool tryPop(OperationContext* txn, Value* value) override {
+ bool tryPop(OperationContext* opCtx, Value* value) override {
tryPopCalled = true;
- if (!peek(txn, value)) {
+ if (!peek(opCtx, value)) {
return false;
}
values.pop_front();
@@ -140,7 +140,7 @@ private:
protected:
OplogBufferMock* _mock = nullptr;
std::unique_ptr<OplogBufferProxy> _proxy;
- OperationContext* _txn = nullptr; // Not dereferenced.
+ OperationContext* _opCtx = nullptr; // Not dereferenced.
};
void OplogBufferProxyTest::setUp() {
@@ -165,29 +165,29 @@ TEST_F(OplogBufferProxyTest, GetTarget) {
}
TEST_F(OplogBufferProxyTest, Startup) {
- _proxy->startup(_txn);
+ _proxy->startup(_opCtx);
ASSERT_TRUE(_mock->startupCalled);
}
TEST_F(OplogBufferProxyTest, ShutdownResetsCachedValues) {
auto pushValue = BSON("x" << 1);
- _proxy->push(_txn, pushValue);
+ _proxy->push(_opCtx, pushValue);
OplogBuffer::Value peekValue;
- ASSERT_TRUE(_proxy->peek(_txn, &peekValue));
+ ASSERT_TRUE(_proxy->peek(_opCtx, &peekValue));
ASSERT_BSONOBJ_EQ(pushValue, peekValue);
- ASSERT_NOT_EQUALS(boost::none, _proxy->lastObjectPushed(_txn));
+ ASSERT_NOT_EQUALS(boost::none, _proxy->lastObjectPushed(_opCtx));
ASSERT_NOT_EQUALS(boost::none, _proxy->getLastPeeked_forTest());
- _proxy->shutdown(_txn);
+ _proxy->shutdown(_opCtx);
ASSERT_TRUE(_mock->shutdownCalled);
- ASSERT_EQUALS(boost::none, _proxy->lastObjectPushed(_txn));
+ ASSERT_EQUALS(boost::none, _proxy->lastObjectPushed(_opCtx));
ASSERT_EQUALS(boost::none, _proxy->getLastPeeked_forTest());
}
TEST_F(OplogBufferProxyTest, WaitForSpace) {
- _proxy->waitForSpace(_txn, 100U);
+ _proxy->waitForSpace(_opCtx, 100U);
ASSERT_TRUE(_mock->waitForSpaceCalled);
}
@@ -199,7 +199,7 @@ TEST_F(OplogBufferProxyTest, MaxSize) {
TEST_F(OplogBufferProxyTest, EmptySizeAndCount) {
ASSERT_TRUE(_proxy->isEmpty());
OplogBuffer::Batch values = {BSON("x" << 1), BSON("x" << 2)};
- _proxy->pushAllNonBlocking(_txn, values.cbegin(), values.cend());
+ _proxy->pushAllNonBlocking(_opCtx, values.cbegin(), values.cend());
ASSERT_FALSE(_proxy->isEmpty());
ASSERT_EQUALS(values.size(), _mock->getCount());
ASSERT_EQUALS(_mock->getCount(), _proxy->getCount());
@@ -209,79 +209,79 @@ TEST_F(OplogBufferProxyTest, EmptySizeAndCount) {
TEST_F(OplogBufferProxyTest, ClearResetsCachedValues) {
OplogBuffer::Batch values = {BSON("x" << 1), BSON("x" << 2)};
- _proxy->pushAllNonBlocking(_txn, values.cbegin(), values.cend());
+ _proxy->pushAllNonBlocking(_opCtx, values.cbegin(), values.cend());
ASSERT_FALSE(_mock->isEmpty());
- auto lastObjPushed = _proxy->lastObjectPushed(_txn);
+ auto lastObjPushed = _proxy->lastObjectPushed(_opCtx);
ASSERT_NOT_EQUALS(boost::none, lastObjPushed);
ASSERT_BSONOBJ_EQ(values.back(), *lastObjPushed);
ASSERT_FALSE(_mock->lastObjectPushedCalled);
OplogBuffer::Value peekValue;
- ASSERT_TRUE(_proxy->peek(_txn, &peekValue));
+ ASSERT_TRUE(_proxy->peek(_opCtx, &peekValue));
ASSERT_NOT_EQUALS(boost::none, _proxy->getLastPeeked_forTest());
- _proxy->clear(_txn);
+ _proxy->clear(_opCtx);
ASSERT_TRUE(_mock->isEmpty());
- ASSERT_EQUALS(boost::none, _proxy->lastObjectPushed(_txn));
+ ASSERT_EQUALS(boost::none, _proxy->lastObjectPushed(_opCtx));
ASSERT_EQUALS(boost::none, _proxy->getLastPeeked_forTest());
}
void _testPushFunctionUpdatesCachedLastObjectPushed(
- OperationContext* txn,
+ OperationContext* opCtx,
OplogBuffer* proxy,
OplogBufferMock* mock,
stdx::function<std::size_t(
- OperationContext* txn, OplogBuffer* proxy, const OplogBuffer::Value& value)> pushFn) {
- ASSERT_EQUALS(proxy->lastObjectPushed(txn), boost::none);
+ OperationContext* opCtx, OplogBuffer* proxy, const OplogBuffer::Value& value)> pushFn) {
+ ASSERT_EQUALS(proxy->lastObjectPushed(opCtx), boost::none);
ASSERT_FALSE(mock->lastObjectPushedCalled);
auto val = BSON("x" << 1);
- auto numPushed = pushFn(txn, proxy, val);
+ auto numPushed = pushFn(opCtx, proxy, val);
ASSERT_EQUALS(numPushed, mock->values.size());
ASSERT_BSONOBJ_EQ(val, mock->values.back());
- auto lastObjPushed = proxy->lastObjectPushed(txn);
+ auto lastObjPushed = proxy->lastObjectPushed(opCtx);
ASSERT_NOT_EQUALS(boost::none, lastObjPushed);
ASSERT_BSONOBJ_EQ(val, *lastObjPushed);
ASSERT_FALSE(mock->lastObjectPushedCalled);
}
TEST_F(OplogBufferProxyTest, PushEvenIfFullUpdatesCachedLastObjectPushed) {
- auto pushFn = [](OperationContext* txn, OplogBuffer* proxy, const OplogBuffer::Value& value) {
- proxy->pushEvenIfFull(txn, value);
+ auto pushFn = [](OperationContext* opCtx, OplogBuffer* proxy, const OplogBuffer::Value& value) {
+ proxy->pushEvenIfFull(opCtx, value);
return 1U;
};
- _testPushFunctionUpdatesCachedLastObjectPushed(_txn, _proxy.get(), _mock, pushFn);
+ _testPushFunctionUpdatesCachedLastObjectPushed(_opCtx, _proxy.get(), _mock, pushFn);
}
TEST_F(OplogBufferProxyTest, PushUpdatesCachedLastObjectPushed) {
- auto pushFn = [](OperationContext* txn, OplogBuffer* proxy, const OplogBuffer::Value& value) {
- proxy->push(txn, value);
+ auto pushFn = [](OperationContext* opCtx, OplogBuffer* proxy, const OplogBuffer::Value& value) {
+ proxy->push(opCtx, value);
return 1U;
};
- _testPushFunctionUpdatesCachedLastObjectPushed(_txn, _proxy.get(), _mock, pushFn);
+ _testPushFunctionUpdatesCachedLastObjectPushed(_opCtx, _proxy.get(), _mock, pushFn);
}
TEST_F(OplogBufferProxyTest, PushAllNonBlockingUpdatesCachedLastObjectPushed) {
- auto pushFn = [](OperationContext* txn, OplogBuffer* proxy, const OplogBuffer::Value& value) {
+ auto pushFn = [](OperationContext* opCtx, OplogBuffer* proxy, const OplogBuffer::Value& value) {
OplogBuffer::Batch values = {BSON("x" << 2), value};
- proxy->pushAllNonBlocking(txn, values.cbegin(), values.cend());
+ proxy->pushAllNonBlocking(opCtx, values.cbegin(), values.cend());
return values.size();
};
- _testPushFunctionUpdatesCachedLastObjectPushed(_txn, _proxy.get(), _mock, pushFn);
+ _testPushFunctionUpdatesCachedLastObjectPushed(_opCtx, _proxy.get(), _mock, pushFn);
}
TEST_F(OplogBufferProxyTest, PushAllNonBlockingDoesNotUpdateCachedLastObjectPushedOnEmptyBatch) {
OplogBuffer::Batch values;
- _proxy->pushAllNonBlocking(_txn, values.cbegin(), values.cend());
+ _proxy->pushAllNonBlocking(_opCtx, values.cbegin(), values.cend());
ASSERT_EQUALS(values.size(), _mock->values.size());
- ASSERT_EQUALS(boost::none, _proxy->lastObjectPushed(_txn));
+ ASSERT_EQUALS(boost::none, _proxy->lastObjectPushed(_opCtx));
ASSERT_FALSE(_mock->lastObjectPushedCalled);
}
TEST_F(OplogBufferProxyTest, WaitForDataReturnsTrueImmediatelyIfLastObjectPushedIsCached) {
- _proxy->pushEvenIfFull(_txn, BSON("x" << 1));
+ _proxy->pushEvenIfFull(_opCtx, BSON("x" << 1));
ASSERT_TRUE(_proxy->waitForData(Seconds(10)));
ASSERT_FALSE(_mock->waitForDataCalled);
}
@@ -293,15 +293,15 @@ TEST_F(OplogBufferProxyTest, WaitForDataForwardsCallToTargetIfLastObjectPushedIs
TEST_F(OplogBufferProxyTest, TryPopResetsLastPushedObjectIfBufferIsEmpty) {
auto pushValue = BSON("x" << 1);
- _proxy->push(_txn, BSON("x" << 1));
- auto lastPushed = _proxy->lastObjectPushed(_txn);
- ASSERT_NOT_EQUALS(boost::none, _proxy->lastObjectPushed(_txn));
+ _proxy->push(_opCtx, BSON("x" << 1));
+ auto lastPushed = _proxy->lastObjectPushed(_opCtx);
+ ASSERT_NOT_EQUALS(boost::none, _proxy->lastObjectPushed(_opCtx));
ASSERT_BSONOBJ_EQ(pushValue, *lastPushed);
OplogBuffer::Value poppedValue;
- ASSERT_TRUE(_proxy->tryPop(_txn, &poppedValue));
+ ASSERT_TRUE(_proxy->tryPop(_opCtx, &poppedValue));
ASSERT_BSONOBJ_EQ(pushValue, poppedValue);
- ASSERT_EQUALS(boost::none, _proxy->lastObjectPushed(_txn));
+ ASSERT_EQUALS(boost::none, _proxy->lastObjectPushed(_opCtx));
// waitForData should forward call to underlying buffer.
ASSERT_FALSE(_proxy->waitForData(Seconds(10)));
@@ -311,41 +311,41 @@ TEST_F(OplogBufferProxyTest, TryPopResetsLastPushedObjectIfBufferIsEmpty) {
TEST_F(OplogBufferProxyTest, PeekCachesFrontOfBuffer) {
OplogBuffer::Value peekValue;
ASSERT_FALSE(_mock->peekCalled);
- ASSERT_FALSE(_proxy->peek(_txn, &peekValue));
+ ASSERT_FALSE(_proxy->peek(_opCtx, &peekValue));
ASSERT_TRUE(_mock->peekCalled);
ASSERT_TRUE(peekValue.isEmpty());
_mock->peekCalled = false;
OplogBuffer::Batch values = {BSON("x" << 1), BSON("x" << 2)};
- _proxy->pushAllNonBlocking(_txn, values.cbegin(), values.cend());
+ _proxy->pushAllNonBlocking(_opCtx, values.cbegin(), values.cend());
ASSERT_EQUALS(values.size(), _mock->values.size());
- ASSERT_TRUE(_proxy->peek(_txn, &peekValue));
+ ASSERT_TRUE(_proxy->peek(_opCtx, &peekValue));
ASSERT_TRUE(_mock->peekCalled);
ASSERT_BSONOBJ_EQ(values.front(), peekValue);
_mock->peekCalled = false;
peekValue = OplogBuffer::Value();
- ASSERT_TRUE(_proxy->peek(_txn, &peekValue));
+ ASSERT_TRUE(_proxy->peek(_opCtx, &peekValue));
ASSERT_FALSE(_mock->peekCalled);
ASSERT_BSONOBJ_EQ(values.front(), peekValue);
}
TEST_F(OplogBufferProxyTest, TryPopClearsCachedFrontValue) {
OplogBuffer::Batch values = {BSON("x" << 1), BSON("x" << 2)};
- _proxy->pushAllNonBlocking(_txn, values.cbegin(), values.cend());
+ _proxy->pushAllNonBlocking(_opCtx, values.cbegin(), values.cend());
ASSERT_EQUALS(values.size(), _mock->values.size());
// Peek and pop first value {x: 1}.
OplogBuffer::Value peekValue;
- ASSERT_TRUE(_proxy->peek(_txn, &peekValue));
+ ASSERT_TRUE(_proxy->peek(_opCtx, &peekValue));
ASSERT_TRUE(_mock->peekCalled);
ASSERT_BSONOBJ_EQ(values.front(), peekValue);
_mock->peekCalled = false;
peekValue = OplogBuffer::Value();
OplogBuffer::Value poppedValue;
- ASSERT_TRUE(_proxy->tryPop(_txn, &poppedValue));
+ ASSERT_TRUE(_proxy->tryPop(_opCtx, &poppedValue));
ASSERT_TRUE(_mock->tryPopCalled);
ASSERT_BSONOBJ_EQ(values.front(), poppedValue);
ASSERT_EQUALS(boost::none, _proxy->getLastPeeked_forTest());
@@ -353,14 +353,14 @@ TEST_F(OplogBufferProxyTest, TryPopClearsCachedFrontValue) {
poppedValue = OplogBuffer::Value();
// Peek and pop second value {x: 2}.
- ASSERT_TRUE(_proxy->peek(_txn, &peekValue));
+ ASSERT_TRUE(_proxy->peek(_opCtx, &peekValue));
ASSERT_TRUE(_mock->peekCalled);
ASSERT_BSONOBJ_EQ(values.back(), peekValue);
ASSERT_NOT_EQUALS(boost::none, _proxy->getLastPeeked_forTest());
_mock->peekCalled = false;
peekValue = OplogBuffer::Value();
- ASSERT_TRUE(_proxy->tryPop(_txn, &poppedValue));
+ ASSERT_TRUE(_proxy->tryPop(_opCtx, &poppedValue));
ASSERT_TRUE(_mock->tryPopCalled);
ASSERT_BSONOBJ_EQ(values.back(), poppedValue);
ASSERT_EQUALS(boost::none, _proxy->getLastPeeked_forTest());
@@ -368,12 +368,12 @@ TEST_F(OplogBufferProxyTest, TryPopClearsCachedFrontValue) {
poppedValue = OplogBuffer::Value();
// Peek and pop empty buffer.
- ASSERT_FALSE(_proxy->peek(_txn, &peekValue));
+ ASSERT_FALSE(_proxy->peek(_opCtx, &peekValue));
ASSERT_TRUE(_mock->peekCalled);
ASSERT_TRUE(peekValue.isEmpty());
ASSERT_EQUALS(boost::none, _proxy->getLastPeeked_forTest());
- ASSERT_FALSE(_proxy->tryPop(_txn, &poppedValue));
+ ASSERT_FALSE(_proxy->tryPop(_opCtx, &poppedValue));
ASSERT_TRUE(_mock->tryPopCalled);
ASSERT_TRUE(poppedValue.isEmpty());
ASSERT_EQUALS(boost::none, _proxy->getLastPeeked_forTest());
diff --git a/src/mongo/db/repl/oplog_interface_local.cpp b/src/mongo/db/repl/oplog_interface_local.cpp
index 03b93892885..a68fd8db0d6 100644
--- a/src/mongo/db/repl/oplog_interface_local.cpp
+++ b/src/mongo/db/repl/oplog_interface_local.cpp
@@ -43,7 +43,7 @@ namespace {
class OplogIteratorLocal : public OplogInterface::Iterator {
public:
- OplogIteratorLocal(OperationContext* txn, const std::string& collectionName);
+ OplogIteratorLocal(OperationContext* opCtx, const std::string& collectionName);
StatusWith<Value> next() override;
@@ -55,12 +55,12 @@ private:
std::unique_ptr<PlanExecutor> _exec;
};
-OplogIteratorLocal::OplogIteratorLocal(OperationContext* txn, const std::string& collectionName)
- : _transaction(txn, MODE_IS),
- _dbLock(txn->lockState(), nsToDatabase(collectionName), MODE_IS),
- _collectionLock(txn->lockState(), collectionName, MODE_S),
- _ctx(txn, collectionName),
- _exec(InternalPlanner::collectionScan(txn,
+OplogIteratorLocal::OplogIteratorLocal(OperationContext* opCtx, const std::string& collectionName)
+ : _transaction(opCtx, MODE_IS),
+ _dbLock(opCtx->lockState(), nsToDatabase(collectionName), MODE_IS),
+ _collectionLock(opCtx->lockState(), collectionName, MODE_S),
+ _ctx(opCtx, collectionName),
+ _exec(InternalPlanner::collectionScan(opCtx,
collectionName,
_ctx.db()->getCollection(collectionName),
PlanExecutor::YIELD_MANUAL,
@@ -84,20 +84,21 @@ StatusWith<OplogInterface::Iterator::Value> OplogIteratorLocal::next() {
} // namespace
-OplogInterfaceLocal::OplogInterfaceLocal(OperationContext* txn, const std::string& collectionName)
- : _txn(txn), _collectionName(collectionName) {
- invariant(txn);
+OplogInterfaceLocal::OplogInterfaceLocal(OperationContext* opCtx, const std::string& collectionName)
+ : _opCtx(opCtx), _collectionName(collectionName) {
+ invariant(opCtx);
invariant(!collectionName.empty());
}
std::string OplogInterfaceLocal::toString() const {
return str::stream() << "LocalOplogInterface: "
"operation context: "
- << _txn->getOpID() << "; collection: " << _collectionName;
+ << _opCtx->getOpID() << "; collection: " << _collectionName;
}
std::unique_ptr<OplogInterface::Iterator> OplogInterfaceLocal::makeIterator() const {
- return std::unique_ptr<OplogInterface::Iterator>(new OplogIteratorLocal(_txn, _collectionName));
+ return std::unique_ptr<OplogInterface::Iterator>(
+ new OplogIteratorLocal(_opCtx, _collectionName));
}
} // namespace repl
diff --git a/src/mongo/db/repl/oplog_interface_local.h b/src/mongo/db/repl/oplog_interface_local.h
index 32c9adc4377..577dbe8d7fa 100644
--- a/src/mongo/db/repl/oplog_interface_local.h
+++ b/src/mongo/db/repl/oplog_interface_local.h
@@ -42,12 +42,12 @@ namespace repl {
class OplogInterfaceLocal : public OplogInterface {
public:
- OplogInterfaceLocal(OperationContext* txn, const std::string& collectionName);
+ OplogInterfaceLocal(OperationContext* opCtx, const std::string& collectionName);
std::string toString() const override;
std::unique_ptr<OplogInterface::Iterator> makeIterator() const override;
private:
- OperationContext* _txn;
+ OperationContext* _opCtx;
std::string _collectionName;
};
diff --git a/src/mongo/db/repl/oplogreader.cpp b/src/mongo/db/repl/oplogreader.cpp
index 92bc6648b8b..665554c25b2 100644
--- a/src/mongo/db/repl/oplogreader.cpp
+++ b/src/mongo/db/repl/oplogreader.cpp
@@ -152,7 +152,7 @@ Status OplogReader::_compareRequiredOpTimeWithQueryResponse(const OpTime& requir
return Status::OK();
}
-void OplogReader::connectToSyncSource(OperationContext* txn,
+void OplogReader::connectToSyncSource(OperationContext* opCtx,
const OpTime& lastOpTimeFetched,
const OpTime& requiredOpTime,
ReplicationCoordinator* replCoord) {
diff --git a/src/mongo/db/repl/oplogreader.h b/src/mongo/db/repl/oplogreader.h
index 1434125697a..641315b748f 100644
--- a/src/mongo/db/repl/oplogreader.h
+++ b/src/mongo/db/repl/oplogreader.h
@@ -155,7 +155,7 @@ public:
* sync source blacklist.
* This function may throw DB exceptions.
*/
- void connectToSyncSource(OperationContext* txn,
+ void connectToSyncSource(OperationContext* opCtx,
const OpTime& lastOpTimeFetched,
const OpTime& requiredOpTime,
ReplicationCoordinator* replCoord);
diff --git a/src/mongo/db/repl/repl_client_info.cpp b/src/mongo/db/repl/repl_client_info.cpp
index 3e98a0cb9d3..8064f8c5fc0 100644
--- a/src/mongo/db/repl/repl_client_info.cpp
+++ b/src/mongo/db/repl/repl_client_info.cpp
@@ -48,9 +48,10 @@ void ReplClientInfo::setLastOp(const OpTime& ot) {
_lastOp = ot;
}
-void ReplClientInfo::setLastOpToSystemLastOpTime(OperationContext* txn) {
- ReplicationCoordinator* replCoord = repl::ReplicationCoordinator::get(txn->getServiceContext());
- if (replCoord->isReplEnabled() && txn->writesAreReplicated()) {
+void ReplClientInfo::setLastOpToSystemLastOpTime(OperationContext* opCtx) {
+ ReplicationCoordinator* replCoord =
+ repl::ReplicationCoordinator::get(opCtx->getServiceContext());
+ if (replCoord->isReplEnabled() && opCtx->writesAreReplicated()) {
setLastOp(replCoord->getMyLastAppliedOpTime());
}
}
diff --git a/src/mongo/db/repl/repl_client_info.h b/src/mongo/db/repl/repl_client_info.h
index 3c81953d65f..3c3910cf78d 100644
--- a/src/mongo/db/repl/repl_client_info.h
+++ b/src/mongo/db/repl/repl_client_info.h
@@ -76,7 +76,7 @@ public:
* This is necessary when doing no-op writes, as we need to set the client's lastOp to a proper
* value for write concern wait to work.
*/
- void setLastOpToSystemLastOpTime(OperationContext* txn);
+ void setLastOpToSystemLastOpTime(OperationContext* opCtx);
private:
static const long long kUninitializedTerm = -1;
diff --git a/src/mongo/db/repl/repl_set_commands.cpp b/src/mongo/db/repl/repl_set_commands.cpp
index c2793e42764..f3a0cd748e2 100644
--- a/src/mongo/db/repl/repl_set_commands.cpp
+++ b/src/mongo/db/repl/repl_set_commands.cpp
@@ -93,7 +93,7 @@ public:
return Status::OK();
}
CmdReplSetTest() : ReplSetCommand("replSetTest") {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
@@ -165,7 +165,7 @@ MONGO_INITIALIZER(RegisterReplSetTestCmd)(InitializerContext* context) {
class CmdReplSetGetRBID : public ReplSetCommand {
public:
CmdReplSetGetRBID() : ReplSetCommand("replSetGetRBID") {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
@@ -188,14 +188,14 @@ public:
help << "\nhttp://dochub.mongodb.org/core/replicasetcommands";
}
CmdReplSetGetStatus() : ReplSetCommand("replSetGetStatus", true) {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
string& errmsg,
BSONObjBuilder& result) {
if (cmdObj["forShell"].trueValue())
- LastError::get(txn->getClient()).disable();
+ LastError::get(opCtx->getClient()).disable();
Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
if (!status.isOK())
@@ -230,7 +230,7 @@ public:
help << "\nhttp://dochub.mongodb.org/core/replicasetcommands";
}
CmdReplSetGetConfig() : ReplSetCommand("replSetGetConfig", true) {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
@@ -330,7 +330,7 @@ public:
h << "Initiate/christen a replica set.";
h << "\nhttp://dochub.mongodb.org/core/replicasetcommands";
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
@@ -342,7 +342,7 @@ public:
}
std::string replSetString =
- ReplicationCoordinator::get(txn)->getSettings().getReplSetString();
+ ReplicationCoordinator::get(opCtx)->getSettings().getReplSetString();
if (replSetString.empty()) {
return appendCommandStatus(result,
Status(ErrorCodes::NoReplicationEnabled,
@@ -356,7 +356,7 @@ public:
result.append("info2", noConfigMessage);
log() << "initiate : " << noConfigMessage;
- ReplicationCoordinatorExternalStateImpl externalState(StorageInterface::get(txn));
+ ReplicationCoordinatorExternalStateImpl externalState(StorageInterface::get(opCtx));
std::string name;
std::vector<HostAndPort> seeds;
parseReplSetSeedList(&externalState, replSetString, &name, &seeds); // may throw...
@@ -386,7 +386,7 @@ public:
}
Status status =
- getGlobalReplicationCoordinator()->processReplSetInitiate(txn, configObj, &result);
+ getGlobalReplicationCoordinator()->processReplSetInitiate(opCtx, configObj, &result);
return appendCommandStatus(result, status);
}
@@ -404,7 +404,7 @@ public:
help << "\nhttp://dochub.mongodb.org/core/replicasetcommands";
}
CmdReplSetReconfig() : ReplSetCommand("replSetReconfig") {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
@@ -424,15 +424,15 @@ public:
parsedArgs.newConfigObj = cmdObj["replSetReconfig"].Obj();
parsedArgs.force = cmdObj.hasField("force") && cmdObj["force"].trueValue();
status =
- getGlobalReplicationCoordinator()->processReplSetReconfig(txn, parsedArgs, &result);
+ getGlobalReplicationCoordinator()->processReplSetReconfig(opCtx, parsedArgs, &result);
- ScopedTransaction scopedXact(txn, MODE_X);
- Lock::GlobalWrite globalWrite(txn->lockState());
+ ScopedTransaction scopedXact(opCtx, MODE_X);
+ Lock::GlobalWrite globalWrite(opCtx->lockState());
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
if (status.isOK() && !parsedArgs.force) {
getGlobalServiceContext()->getOpObserver()->onOpMessage(
- txn,
+ opCtx,
BSON("msg"
<< "Reconfig set"
<< "version"
@@ -462,7 +462,7 @@ public:
help << "\nhttp://dochub.mongodb.org/core/replicasetcommands";
}
CmdReplSetFreeze() : ReplSetCommand("replSetFreeze") {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
@@ -494,7 +494,7 @@ public:
help << "http://dochub.mongodb.org/core/replicasetcommands";
}
CmdReplSetStepDown() : ReplSetCommand("replSetStepDown") {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
@@ -543,7 +543,7 @@ public:
log() << "Attempting to step down in response to replSetStepDown command";
status = getGlobalReplicationCoordinator()->stepDown(
- txn, force, Seconds(secondaryCatchUpPeriodSecs), Seconds(stepDownForSecs));
+ opCtx, force, Seconds(secondaryCatchUpPeriodSecs), Seconds(stepDownForSecs));
return appendCommandStatus(result, status);
}
@@ -560,7 +560,7 @@ public:
help << "Enable or disable maintenance mode.";
}
CmdReplSetMaintenance() : ReplSetCommand("replSetMaintenance") {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
@@ -589,7 +589,7 @@ public:
"in-progress initial sync.";
}
CmdReplSetSyncFrom() : ReplSetCommand("replSetSyncFrom") {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
@@ -606,7 +606,7 @@ public:
return appendCommandStatus(result,
getGlobalReplicationCoordinator()->processReplSetSyncFrom(
- txn, targetHostAndPort, &result));
+ opCtx, targetHostAndPort, &result));
}
private:
@@ -618,13 +618,13 @@ private:
class CmdReplSetUpdatePosition : public ReplSetCommand {
public:
CmdReplSetUpdatePosition() : ReplSetCommand("replSetUpdatePosition") {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
string& errmsg,
BSONObjBuilder& result) {
- auto replCoord = repl::ReplicationCoordinator::get(txn->getClient()->getServiceContext());
+ auto replCoord = repl::ReplicationCoordinator::get(opCtx->getClient()->getServiceContext());
Status status = replCoord->checkReplEnabledForCommand(&result);
if (!status.isOK())
@@ -684,7 +684,7 @@ namespace {
* The "local" database does NOT count except for "rs.oplog" collection.
* Used to set the hasData field on replset heartbeat command response.
*/
-bool replHasDatabases(OperationContext* txn) {
+bool replHasDatabases(OperationContext* opCtx) {
std::vector<string> names;
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
storageEngine->listDatabases(&names);
@@ -697,7 +697,7 @@ bool replHasDatabases(OperationContext* txn) {
// we have a local database. return true if oplog isn't empty
BSONObj o;
- if (Helpers::getSingleton(txn, repl::rsOplogName.c_str(), o)) {
+ if (Helpers::getSingleton(opCtx, repl::rsOplogName.c_str(), o)) {
return true;
}
}
@@ -718,7 +718,7 @@ MONGO_FP_DECLARE(rsDelayHeartbeatResponse);
class CmdReplSetHeartbeat : public ReplSetCommand {
public:
CmdReplSetHeartbeat() : ReplSetCommand("replSetHeartbeat") {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
@@ -740,7 +740,7 @@ public:
/* we want to keep heartbeat connections open when relinquishing primary.
tag them here. */
transport::Session::TagMask originalTag = 0;
- auto session = txn->getClient()->session();
+ auto session = opCtx->getClient()->session();
if (session) {
originalTag = session->getTags();
session->replaceTags(originalTag | transport::Session::kKeepOpen);
@@ -777,7 +777,7 @@ public:
// ugh.
if (args.getCheckEmpty()) {
- result.append("hasData", replHasDatabases(txn));
+ result.append("hasData", replHasDatabases(opCtx));
}
ReplSetHeartbeatResponse response;
@@ -795,7 +795,7 @@ class CmdReplSetFresh : public ReplSetCommand {
public:
CmdReplSetFresh() : ReplSetCommand("replSetFresh") {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
@@ -828,7 +828,7 @@ public:
CmdReplSetElect() : ReplSetCommand("replSetElect") {}
private:
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
@@ -862,7 +862,7 @@ class CmdReplSetStepUp : public ReplSetCommand {
public:
CmdReplSetStepUp() : ReplSetCommand("replSetStepUp") {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
diff --git a/src/mongo/db/repl/repl_set_request_votes.cpp b/src/mongo/db/repl/repl_set_request_votes.cpp
index 02eb5311cb3..5577952807d 100644
--- a/src/mongo/db/repl/repl_set_request_votes.cpp
+++ b/src/mongo/db/repl/repl_set_request_votes.cpp
@@ -47,7 +47,7 @@ public:
CmdReplSetRequestVotes() : ReplSetCommand("replSetRequestVotes") {}
private:
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string&,
BSONObj& cmdObj,
int,
@@ -67,7 +67,7 @@ private:
// We want to keep request vote connection open when relinquishing primary.
// Tag it here.
transport::Session::TagMask originalTag = 0;
- auto session = txn->getClient()->session();
+ auto session = opCtx->getClient()->session();
if (session) {
originalTag = session->getTags();
session->replaceTags(originalTag | transport::Session::kKeepOpen);
@@ -82,7 +82,7 @@ private:
ReplSetRequestVotesResponse response;
status = getGlobalReplicationCoordinator()->processReplSetRequestVotes(
- txn, parsedArgs, &response);
+ opCtx, parsedArgs, &response);
response.addToBSON(&result);
return appendCommandStatus(result, status);
}
diff --git a/src/mongo/db/repl/repl_set_web_handler.cpp b/src/mongo/db/repl/repl_set_web_handler.cpp
index 3f67cd9a45c..404dc659ae7 100644
--- a/src/mongo/db/repl/repl_set_web_handler.cpp
+++ b/src/mongo/db/repl/repl_set_web_handler.cpp
@@ -51,7 +51,7 @@ public:
return str::startsWith(url, "/_replSet");
}
- virtual void handle(OperationContext* txn,
+ virtual void handle(OperationContext* opCtx,
const char* rq,
const std::string& url,
BSONObj params,
@@ -59,12 +59,12 @@ public:
int& responseCode,
std::vector<std::string>& headers,
const SockAddr& from) {
- responseMsg = _replSet(txn);
+ responseMsg = _replSet(opCtx);
responseCode = 200;
}
/* /_replSet show replica set status in html format */
- std::string _replSet(OperationContext* txn) {
+ std::string _replSet(OperationContext* opCtx) {
std::stringstream s;
s << start("Replica Set Status " + prettyHostName());
s << p(a("/", "back", "Home") + " | " +
diff --git a/src/mongo/db/repl/replication_coordinator.h b/src/mongo/db/repl/replication_coordinator.h
index 1d3dda6f5b6..d5557213bb4 100644
--- a/src/mongo/db/repl/replication_coordinator.h
+++ b/src/mongo/db/repl/replication_coordinator.h
@@ -123,14 +123,14 @@ public:
* components of the replication system to start up whatever threads and do whatever
* initialization they need.
*/
- virtual void startup(OperationContext* txn) = 0;
+ virtual void startup(OperationContext* opCtx) = 0;
/**
* Does whatever cleanup is required to stop replication, including instructing the other
* components of the replication system to shut down and stop any threads they are using,
* blocking until all replication-related shutdown tasks are complete.
*/
- virtual void shutdown(OperationContext* txn) = 0;
+ virtual void shutdown(OperationContext* opCtx) = 0;
/**
* Returns a pointer to the ReplicationExecutor.
@@ -194,7 +194,7 @@ public:
* writeConcern.wTimeout of -1 indicates return immediately after checking. Return codes:
* ErrorCodes::WriteConcernFailed if the writeConcern.wTimeout is reached before
* the data has been sufficiently replicated
- * ErrorCodes::ExceededTimeLimit if the txn->getMaxTimeMicrosRemaining is reached before
+ * ErrorCodes::ExceededTimeLimit if the opCtx->getMaxTimeMicrosRemaining is reached before
* the data has been sufficiently replicated
* ErrorCodes::NotMaster if the node is not Primary/Master
* ErrorCodes::UnknownReplWriteConcern if the writeConcern.wMode contains a write concern
@@ -202,16 +202,16 @@ public:
* ErrorCodes::ShutdownInProgress if we are mid-shutdown
* ErrorCodes::Interrupted if the operation was killed with killop()
*/
- virtual StatusAndDuration awaitReplication(OperationContext* txn,
+ virtual StatusAndDuration awaitReplication(OperationContext* opCtx,
const OpTime& opTime,
const WriteConcernOptions& writeConcern) = 0;
/**
* Like awaitReplication(), above, but waits for the replication of the last operation
- * performed on the client associated with "txn".
+ * performed on the client associated with "opCtx".
*/
virtual StatusAndDuration awaitReplicationOfLastOpForClient(
- OperationContext* txn, const WriteConcernOptions& writeConcern) = 0;
+ OperationContext* opCtx, const WriteConcernOptions& writeConcern) = 0;
/**
* Causes this node to relinquish being primary for at least 'stepdownTime'. If 'force' is
@@ -222,7 +222,7 @@ public:
* ErrorCodes::SecondaryAheadOfPrimary if we are primary but there is another node that
* seems to be ahead of us in replication, and Status::OK otherwise.
*/
- virtual Status stepDown(OperationContext* txn,
+ virtual Status stepDown(OperationContext* opCtx,
bool force,
const Milliseconds& waitTime,
const Milliseconds& stepdownTime) = 0;
@@ -245,14 +245,14 @@ public:
* NOTE: This function can only be meaningfully called while the caller holds the global
* lock in some mode other than MODE_NONE.
*/
- virtual bool canAcceptWritesForDatabase(OperationContext* txn, StringData dbName) = 0;
+ virtual bool canAcceptWritesForDatabase(OperationContext* opCtx, StringData dbName) = 0;
/**
* Version which does not check for the global lock. Do not use in new code.
* Without the global lock held, the return value may be inaccurate by the time
* the function returns.
*/
- virtual bool canAcceptWritesForDatabase_UNSAFE(OperationContext* txn, StringData dbName) = 0;
+ virtual bool canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx, StringData dbName) = 0;
/**
* Returns true if it is valid for this node to accept writes on the given namespace.
@@ -260,14 +260,14 @@ public:
* The result of this function should be consistent with canAcceptWritesForDatabase()
* for the database the namespace refers to, with additional checks on the collection.
*/
- virtual bool canAcceptWritesFor(OperationContext* txn, const NamespaceString& ns) = 0;
+ virtual bool canAcceptWritesFor(OperationContext* opCtx, const NamespaceString& ns) = 0;
/**
* Version which does not check for the global lock. Do not use in new code.
* Without the global lock held, the return value may be inaccurate by the time
* the function returns.
*/
- virtual bool canAcceptWritesFor_UNSAFE(OperationContext* txn, const NamespaceString& ns) = 0;
+ virtual bool canAcceptWritesFor_UNSAFE(OperationContext* opCtx, const NamespaceString& ns) = 0;
/**
* Checks if the current replica set configuration can satisfy the given write concern.
@@ -284,7 +284,7 @@ public:
* Returns Status::OK() if it is valid for this node to serve reads on the given collection
* and an errorcode indicating why the node cannot if it cannot.
*/
- virtual Status checkCanServeReadsFor(OperationContext* txn,
+ virtual Status checkCanServeReadsFor(OperationContext* opCtx,
const NamespaceString& ns,
bool slaveOk) = 0;
@@ -293,7 +293,7 @@ public:
* Without the global lock held, the return value may be inaccurate by the time
* the function returns.
*/
- virtual Status checkCanServeReadsFor_UNSAFE(OperationContext* txn,
+ virtual Status checkCanServeReadsFor_UNSAFE(OperationContext* opCtx,
const NamespaceString& ns,
bool slaveOk) = 0;
@@ -303,7 +303,8 @@ public:
* The namespace "ns" is passed in because the "local" database is usually writable
* and we need to enforce the constraints for it.
*/
- virtual bool shouldRelaxIndexConstraints(OperationContext* txn, const NamespaceString& ns) = 0;
+ virtual bool shouldRelaxIndexConstraints(OperationContext* opCtx,
+ const NamespaceString& ns) = 0;
/**
* Updates our internal tracking of the last OpTime applied for the given slave
@@ -378,7 +379,7 @@ public:
*
* Returns whether the wait was successful.
*/
- virtual Status waitUntilOpTimeForRead(OperationContext* txn,
+ virtual Status waitUntilOpTimeForRead(OperationContext* opCtx,
const ReadConcernArgs& settings) = 0;
/**
@@ -486,7 +487,7 @@ public:
* steps down and steps up so quickly that the applier signals drain complete in the wrong
* term.
*/
- virtual void signalDrainComplete(OperationContext* txn, long long termWhenBufferIsEmpty) = 0;
+ virtual void signalDrainComplete(OperationContext* opCtx, long long termWhenBufferIsEmpty) = 0;
/**
* Waits duration of 'timeout' for applier to finish draining its buffer of operations.
@@ -527,7 +528,7 @@ public:
/**
* Does an initial sync of data, after dropping existing data.
*/
- virtual Status resyncData(OperationContext* txn, bool waitUntilCompleted) = 0;
+ virtual Status resyncData(OperationContext* opCtx, bool waitUntilCompleted) = 0;
/**
* Handles an incoming isMaster command for a replica set node. Should not be
@@ -592,7 +593,7 @@ public:
* returns Status::OK if the sync target could be set and an ErrorCode indicating why it
* couldn't otherwise.
*/
- virtual Status processReplSetSyncFrom(OperationContext* txn,
+ virtual Status processReplSetSyncFrom(OperationContext* opCtx,
const HostAndPort& target,
BSONObjBuilder* resultObj) = 0;
@@ -625,7 +626,7 @@ public:
* Handles an incoming replSetReconfig command. Adds BSON to 'resultObj';
* returns a Status with either OK or an error message.
*/
- virtual Status processReplSetReconfig(OperationContext* txn,
+ virtual Status processReplSetReconfig(OperationContext* opCtx,
const ReplSetReconfigArgs& args,
BSONObjBuilder* resultObj) = 0;
@@ -634,7 +635,7 @@ public:
* configuration to use.
* Adds BSON to 'resultObj'; returns a Status with either OK or an error message.
*/
- virtual Status processReplSetInitiate(OperationContext* txn,
+ virtual Status processReplSetInitiate(OperationContext* opCtx,
const BSONObj& configObj,
BSONObjBuilder* resultObj) = 0;
@@ -710,7 +711,7 @@ public:
*
* Returns ErrorCodes::IllegalOperation if we're not running with master/slave replication.
*/
- virtual Status processHandshake(OperationContext* txn, const HandshakeArgs& handshake) = 0;
+ virtual Status processHandshake(OperationContext* opCtx, const HandshakeArgs& handshake) = 0;
/**
* Returns a bool indicating whether or not this node builds indexes.
@@ -749,7 +750,7 @@ public:
* Loads the optime from the last op in the oplog into the coordinator's lastAppliedOpTime and
* lastDurableOpTime values.
*/
- virtual void resetLastOpTimesFromOplog(OperationContext* txn) = 0;
+ virtual void resetLastOpTimesFromOplog(OperationContext* opCtx) = 0;
/**
* Returns the OpTime of the latest replica set-committed op known to this server.
@@ -762,7 +763,7 @@ public:
* Handles an incoming replSetRequestVotes command.
* Adds BSON to 'resultObj'; returns a Status with either OK or an error message.
*/
- virtual Status processReplSetRequestVotes(OperationContext* txn,
+ virtual Status processReplSetRequestVotes(OperationContext* opCtx,
const ReplSetRequestVotesArgs& args,
ReplSetRequestVotesResponse* response) = 0;
@@ -803,7 +804,7 @@ public:
* the rest of the work, because the term is still the same).
* Returns StaleTerm if the supplied term was higher than the current term.
*/
- virtual Status updateTerm(OperationContext* txn, long long term) = 0;
+ virtual Status updateTerm(OperationContext* opCtx, long long term) = 0;
/**
* Reserves a unique SnapshotName.
@@ -819,7 +820,7 @@ public:
* A null OperationContext can be used in cases where the snapshot to wait for should not be
* adjusted.
*/
- virtual SnapshotName reserveSnapshotName(OperationContext* txn) = 0;
+ virtual SnapshotName reserveSnapshotName(OperationContext* opCtx) = 0;
/**
* Signals the SnapshotThread, if running, to take a forced snapshot even if the global
@@ -833,16 +834,16 @@ public:
* Creates a new snapshot in the storage engine and registers it for use in the replication
* coordinator.
*/
- virtual void createSnapshot(OperationContext* txn,
+ virtual void createSnapshot(OperationContext* opCtx,
OpTime timeOfSnapshot,
SnapshotName name) = 0;
/**
* Blocks until either the current committed snapshot is at least as high as 'untilSnapshot',
* or we are interrupted for any reason, including shutdown or maxTimeMs expiration.
- * 'txn' is used to checkForInterrupt and enforce maxTimeMS.
+ * 'opCtx' is used to checkForInterrupt and enforce maxTimeMS.
*/
- virtual void waitUntilSnapshotCommitted(OperationContext* txn,
+ virtual void waitUntilSnapshotCommitted(OperationContext* opCtx,
const SnapshotName& untilSnapshot) = 0;
/**
diff --git a/src/mongo/db/repl/replication_coordinator_external_state.h b/src/mongo/db/repl/replication_coordinator_external_state.h
index 66283518bfe..05f07b8aada 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state.h
@@ -60,7 +60,7 @@ class ReplSettings;
class ReplicationCoordinator;
class ReplicationExecutor;
-using OnInitialSyncFinishedFn = stdx::function<void(OperationContext* txn)>;
+using OnInitialSyncFinishedFn = stdx::function<void(OperationContext* opCtx)>;
using StartInitialSyncFn = stdx::function<void(OnInitialSyncFinishedFn callback)>;
using StartSteadyReplicationFn = stdx::function<void()>;
/**
@@ -94,33 +94,33 @@ public:
/**
* Returns true if an incomplete initial sync is detected.
*/
- virtual bool isInitialSyncFlagSet(OperationContext* txn) = 0;
+ virtual bool isInitialSyncFlagSet(OperationContext* opCtx) = 0;
/**
* Starts steady state sync for replica set member -- legacy impl not in DataReplicator.
*
* NOTE: Use either this or the Master/Slave version, but not both.
*/
- virtual void startSteadyStateReplication(OperationContext* txn,
+ virtual void startSteadyStateReplication(OperationContext* opCtx,
ReplicationCoordinator* replCoord) = 0;
- virtual void runOnInitialSyncThread(stdx::function<void(OperationContext* txn)> run) = 0;
+ virtual void runOnInitialSyncThread(stdx::function<void(OperationContext* opCtx)> run) = 0;
/**
* Stops the data replication threads = bgsync, applier, reporter.
*/
- virtual void stopDataReplication(OperationContext* txn) = 0;
+ virtual void stopDataReplication(OperationContext* opCtx) = 0;
/**
* Starts the Master/Slave threads and sets up logOp
*/
- virtual void startMasterSlave(OperationContext* txn) = 0;
+ virtual void startMasterSlave(OperationContext* opCtx) = 0;
/**
* Performs any necessary external state specific shutdown tasks, such as cleaning up
* the threads it started.
*/
- virtual void shutdown(OperationContext* txn) = 0;
+ virtual void shutdown(OperationContext* opCtx) = 0;
/**
* Returns task executor for scheduling tasks to be run asynchronously.
@@ -136,12 +136,12 @@ public:
* Runs the repair database command on the "local" db, if the storage engine is MMapV1.
* Note: Used after initial sync to compact the database files.
*/
- virtual Status runRepairOnLocalDB(OperationContext* txn) = 0;
+ virtual Status runRepairOnLocalDB(OperationContext* opCtx) = 0;
/**
* Creates the oplog, writes the first entry and stores the replica set config document.
*/
- virtual Status initializeReplSetStorage(OperationContext* txn, const BSONObj& config) = 0;
+ virtual Status initializeReplSetStorage(OperationContext* opCtx, const BSONObj& config) = 0;
/**
* Called when a node on way to becoming a primary is ready to leave drain mode. It is called
@@ -149,7 +149,7 @@ public:
*
* Throws on errors.
*/
- virtual void onDrainComplete(OperationContext* txn) = 0;
+ virtual void onDrainComplete(OperationContext* opCtx) = 0;
/**
* Called as part of the process of transitioning to primary and run with the global X lock and
@@ -163,7 +163,7 @@ public:
*
* Throws on errors.
*/
- virtual OpTime onTransitionToPrimary(OperationContext* txn, bool isV1ElectionProtocol) = 0;
+ virtual OpTime onTransitionToPrimary(OperationContext* opCtx, bool isV1ElectionProtocol) = 0;
/**
* Simple wrapper around SyncSourceFeedback::forwardSlaveProgress. Signals to the
@@ -188,22 +188,23 @@ public:
/**
* Gets the replica set config document from local storage, or returns an error.
*/
- virtual StatusWith<BSONObj> loadLocalConfigDocument(OperationContext* txn) = 0;
+ virtual StatusWith<BSONObj> loadLocalConfigDocument(OperationContext* opCtx) = 0;
/**
* Stores the replica set config document in local storage, or returns an error.
*/
- virtual Status storeLocalConfigDocument(OperationContext* txn, const BSONObj& config) = 0;
+ virtual Status storeLocalConfigDocument(OperationContext* opCtx, const BSONObj& config) = 0;
/**
* Gets the replica set lastVote document from local storage, or returns an error.
*/
- virtual StatusWith<LastVote> loadLocalLastVoteDocument(OperationContext* txn) = 0;
+ virtual StatusWith<LastVote> loadLocalLastVoteDocument(OperationContext* opCtx) = 0;
/**
* Stores the replica set lastVote document in local storage, or returns an error.
*/
- virtual Status storeLocalLastVoteDocument(OperationContext* txn, const LastVote& lastVote) = 0;
+ virtual Status storeLocalLastVoteDocument(OperationContext* opCtx,
+ const LastVote& lastVote) = 0;
/**
* Sets the global opTime to be 'newTime'.
@@ -214,20 +215,20 @@ public:
* Gets the last optime of an operation performed on this host, from stable
* storage.
*/
- virtual StatusWith<OpTime> loadLastOpTime(OperationContext* txn) = 0;
+ virtual StatusWith<OpTime> loadLastOpTime(OperationContext* opCtx) = 0;
/**
* Cleaning up the oplog, by potentially truncating:
* If we are recovering from a failed batch then minvalid.start though minvalid.end need
* to be removed from the oplog before we can start applying operations.
*/
- virtual void cleanUpLastApplyBatch(OperationContext* txn) = 0;
+ virtual void cleanUpLastApplyBatch(OperationContext* opCtx) = 0;
/**
* Returns the HostAndPort of the remote client connected to us that initiated the operation
- * represented by "txn".
+ * represented by "opCtx".
*/
- virtual HostAndPort getClientHostAndPort(const OperationContext* txn) = 0;
+ virtual HostAndPort getClientHostAndPort(const OperationContext* opCtx) = 0;
/**
* Closes all connections in the given TransportLayer except those marked with the
@@ -240,7 +241,7 @@ public:
* Kills all operations that have a Client that is associated with an incoming user
* connection. Used during stepdown.
*/
- virtual void killAllUserOperations(OperationContext* txn) = 0;
+ virtual void killAllUserOperations(OperationContext* opCtx) = 0;
/**
* Resets any active sharding metadata on this server and stops any sharding-related threads
@@ -279,7 +280,7 @@ public:
/**
* Creates a new snapshot.
*/
- virtual void createSnapshot(OperationContext* txn, SnapshotName name) = 0;
+ virtual void createSnapshot(OperationContext* opCtx, SnapshotName name) = 0;
/**
* Signals the SnapshotThread, if running, to take a forced snapshot even if the global
@@ -305,13 +306,13 @@ public:
/**
* Returns true if the current storage engine supports read committed.
*/
- virtual bool isReadCommittedSupportedByStorageEngine(OperationContext* txn) const = 0;
+ virtual bool isReadCommittedSupportedByStorageEngine(OperationContext* opCtx) const = 0;
/**
* Applies the operations described in the oplog entries contained in "ops" using the
* "applyOperation" function.
*/
- virtual StatusWith<OpTime> multiApply(OperationContext* txn,
+ virtual StatusWith<OpTime> multiApply(OperationContext* opCtx,
MultiApplier::Operations ops,
MultiApplier::ApplyOperationFn applyOperation) = 0;
@@ -333,13 +334,13 @@ public:
* This function creates an oplog buffer of the type specified at server startup.
*/
virtual std::unique_ptr<OplogBuffer> makeInitialSyncOplogBuffer(
- OperationContext* txn) const = 0;
+ OperationContext* opCtx) const = 0;
/**
* Creates an oplog buffer suitable for steady state replication.
*/
virtual std::unique_ptr<OplogBuffer> makeSteadyStateOplogBuffer(
- OperationContext* txn) const = 0;
+ OperationContext* opCtx) const = 0;
/**
* Returns true if the user specified to use the data replicator for initial sync.
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index 70c74cc8942..36d8a84fd06 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -195,46 +195,46 @@ ReplicationCoordinatorExternalStateImpl::ReplicationCoordinatorExternalStateImpl
}
ReplicationCoordinatorExternalStateImpl::~ReplicationCoordinatorExternalStateImpl() {}
-bool ReplicationCoordinatorExternalStateImpl::isInitialSyncFlagSet(OperationContext* txn) {
- return _storageInterface->getInitialSyncFlag(txn);
+bool ReplicationCoordinatorExternalStateImpl::isInitialSyncFlagSet(OperationContext* opCtx) {
+ return _storageInterface->getInitialSyncFlag(opCtx);
}
void ReplicationCoordinatorExternalStateImpl::startInitialSync(OnInitialSyncFinishedFn finished) {
- _initialSyncRunner.schedule([finished, this](OperationContext* txn, const Status& status) {
+ _initialSyncRunner.schedule([finished, this](OperationContext* opCtx, const Status& status) {
if (status == ErrorCodes::CallbackCanceled) {
return TaskRunner::NextAction::kDisposeOperationContext;
}
// Do initial sync.
- syncDoInitialSync(txn, this);
- finished(txn);
+ syncDoInitialSync(opCtx, this);
+ finished(opCtx);
return TaskRunner::NextAction::kDisposeOperationContext;
});
}
void ReplicationCoordinatorExternalStateImpl::runOnInitialSyncThread(
- stdx::function<void(OperationContext* txn)> run) {
+ stdx::function<void(OperationContext* opCtx)> run) {
_initialSyncRunner.cancel();
_initialSyncRunner.join();
- _initialSyncRunner.schedule([run, this](OperationContext* txn, const Status& status) {
+ _initialSyncRunner.schedule([run, this](OperationContext* opCtx, const Status& status) {
if (status == ErrorCodes::CallbackCanceled) {
return TaskRunner::NextAction::kDisposeOperationContext;
}
- invariant(txn);
- invariant(txn->getClient());
- run(txn);
+ invariant(opCtx);
+ invariant(opCtx->getClient());
+ run(opCtx);
return TaskRunner::NextAction::kDisposeOperationContext;
});
}
void ReplicationCoordinatorExternalStateImpl::startSteadyStateReplication(
- OperationContext* txn, ReplicationCoordinator* replCoord) {
+ OperationContext* opCtx, ReplicationCoordinator* replCoord) {
LockGuard lk(_threadMutex);
invariant(replCoord);
invariant(!_bgSync);
log() << "Starting replication fetcher thread";
- _bgSync = stdx::make_unique<BackgroundSync>(this, makeSteadyStateOplogBuffer(txn));
- _bgSync->startup(txn);
+ _bgSync = stdx::make_unique<BackgroundSync>(this, makeSteadyStateOplogBuffer(opCtx));
+ _bgSync->startup(opCtx);
log() << "Starting replication applier thread";
invariant(!_applierThread);
@@ -246,12 +246,12 @@ void ReplicationCoordinatorExternalStateImpl::startSteadyStateReplication(
&SyncSourceFeedback::run, &_syncSourceFeedback, _taskExecutor.get(), _bgSync.get())));
}
-void ReplicationCoordinatorExternalStateImpl::stopDataReplication(OperationContext* txn) {
+void ReplicationCoordinatorExternalStateImpl::stopDataReplication(OperationContext* opCtx) {
UniqueLock lk(_threadMutex);
- _stopDataReplication_inlock(txn, &lk);
+ _stopDataReplication_inlock(opCtx, &lk);
}
-void ReplicationCoordinatorExternalStateImpl::_stopDataReplication_inlock(OperationContext* txn,
+void ReplicationCoordinatorExternalStateImpl::_stopDataReplication_inlock(OperationContext* opCtx,
UniqueLock* lock) {
// Make sue no other _stopDataReplication calls are in progress.
_dataReplicationStopped.wait(*lock, [this]() { return !_stoppingDataReplication; });
@@ -270,7 +270,7 @@ void ReplicationCoordinatorExternalStateImpl::_stopDataReplication_inlock(Operat
if (oldBgSync) {
log() << "Stopping replication fetcher thread";
- oldBgSync->shutdown(txn);
+ oldBgSync->shutdown(opCtx);
}
if (oldApplier) {
@@ -279,7 +279,7 @@ void ReplicationCoordinatorExternalStateImpl::_stopDataReplication_inlock(Operat
}
if (oldBgSync) {
- oldBgSync->join(txn);
+ oldBgSync->join(opCtx);
}
_initialSyncRunner.cancel();
@@ -320,25 +320,25 @@ void ReplicationCoordinatorExternalStateImpl::startThreads(const ReplSettings& s
_startedThreads = true;
}
-void ReplicationCoordinatorExternalStateImpl::startMasterSlave(OperationContext* txn) {
- repl::startMasterSlave(txn);
+void ReplicationCoordinatorExternalStateImpl::startMasterSlave(OperationContext* opCtx) {
+ repl::startMasterSlave(opCtx);
}
-void ReplicationCoordinatorExternalStateImpl::shutdown(OperationContext* txn) {
+void ReplicationCoordinatorExternalStateImpl::shutdown(OperationContext* opCtx) {
UniqueLock lk(_threadMutex);
if (_startedThreads) {
- _stopDataReplication_inlock(txn, &lk);
+ _stopDataReplication_inlock(opCtx, &lk);
if (_snapshotThread) {
log() << "Stopping replication snapshot thread";
_snapshotThread->shutdown();
}
- if (_storageInterface->getOplogDeleteFromPoint(txn).isNull() &&
- loadLastOpTime(txn) == _storageInterface->getAppliedThrough(txn)) {
+ if (_storageInterface->getOplogDeleteFromPoint(opCtx).isNull() &&
+ loadLastOpTime(opCtx) == _storageInterface->getAppliedThrough(opCtx)) {
// Clear the appliedThrough marker to indicate we are consistent with the top of the
// oplog.
- _storageInterface->setAppliedThrough(txn, {});
+ _storageInterface->setAppliedThrough(opCtx, {});
}
if (_noopWriter) {
@@ -361,95 +361,95 @@ OldThreadPool* ReplicationCoordinatorExternalStateImpl::getDbWorkThreadPool() co
return _writerPool.get();
}
-Status ReplicationCoordinatorExternalStateImpl::runRepairOnLocalDB(OperationContext* txn) {
+Status ReplicationCoordinatorExternalStateImpl::runRepairOnLocalDB(OperationContext* opCtx) {
try {
- ScopedTransaction scopedXact(txn, MODE_X);
- Lock::GlobalWrite globalWrite(txn->lockState());
+ ScopedTransaction scopedXact(opCtx, MODE_X);
+ Lock::GlobalWrite globalWrite(opCtx->lockState());
StorageEngine* engine = getGlobalServiceContext()->getGlobalStorageEngine();
if (!engine->isMmapV1()) {
return Status::OK();
}
- txn->setReplicatedWrites(false);
- Status status = repairDatabase(txn, engine, localDbName, false, false);
+ opCtx->setReplicatedWrites(false);
+ Status status = repairDatabase(opCtx, engine, localDbName, false, false);
// Open database before returning
- dbHolder().openDb(txn, localDbName);
+ dbHolder().openDb(opCtx, localDbName);
} catch (const DBException& ex) {
return ex.toStatus();
}
return Status::OK();
}
-Status ReplicationCoordinatorExternalStateImpl::initializeReplSetStorage(OperationContext* txn,
+Status ReplicationCoordinatorExternalStateImpl::initializeReplSetStorage(OperationContext* opCtx,
const BSONObj& config) {
try {
- createOplog(txn);
+ createOplog(opCtx);
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction scopedXact(txn, MODE_X);
- Lock::GlobalWrite globalWrite(txn->lockState());
+ ScopedTransaction scopedXact(opCtx, MODE_X);
+ Lock::GlobalWrite globalWrite(opCtx->lockState());
- WriteUnitOfWork wuow(txn);
- Helpers::putSingleton(txn, configCollectionName, config);
+ WriteUnitOfWork wuow(opCtx);
+ Helpers::putSingleton(opCtx, configCollectionName, config);
const auto msgObj = BSON("msg"
<< "initiating set");
- getGlobalServiceContext()->getOpObserver()->onOpMessage(txn, msgObj);
+ getGlobalServiceContext()->getOpObserver()->onOpMessage(opCtx, msgObj);
wuow.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "initiate oplog entry", "local.oplog.rs");
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "initiate oplog entry", "local.oplog.rs");
// This initializes the minvalid document with a null "ts" because older versions (<=3.2)
// get angry if the minValid document is present but doesn't have a "ts" field.
// Consider removing this once we no longer need to support downgrading to 3.2.
- _storageInterface->setMinValidToAtLeast(txn, {});
+ _storageInterface->setMinValidToAtLeast(opCtx, {});
- FeatureCompatibilityVersion::setIfCleanStartup(txn, _storageInterface);
+ FeatureCompatibilityVersion::setIfCleanStartup(opCtx, _storageInterface);
} catch (const DBException& ex) {
return ex.toStatus();
}
return Status::OK();
}
-void ReplicationCoordinatorExternalStateImpl::onDrainComplete(OperationContext* txn) {
- invariant(!txn->lockState()->isLocked());
+void ReplicationCoordinatorExternalStateImpl::onDrainComplete(OperationContext* opCtx) {
+ invariant(!opCtx->lockState()->isLocked());
// If this is a config server node becoming a primary, ensure the balancer is ready to start.
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
// We must ensure the balancer has stopped because it may still be in the process of
// stopping if this node was previously primary.
- Balancer::get(txn)->waitForBalancerToStop();
+ Balancer::get(opCtx)->waitForBalancerToStop();
}
}
-OpTime ReplicationCoordinatorExternalStateImpl::onTransitionToPrimary(OperationContext* txn,
+OpTime ReplicationCoordinatorExternalStateImpl::onTransitionToPrimary(OperationContext* opCtx,
bool isV1ElectionProtocol) {
- invariant(txn->lockState()->isW());
+ invariant(opCtx->lockState()->isW());
// Clear the appliedThrough marker so on startup we'll use the top of the oplog. This must be
// done before we add anything to our oplog.
- invariant(_storageInterface->getOplogDeleteFromPoint(txn).isNull());
- _storageInterface->setAppliedThrough(txn, {});
+ invariant(_storageInterface->getOplogDeleteFromPoint(opCtx).isNull());
+ _storageInterface->setAppliedThrough(opCtx, {});
if (isV1ElectionProtocol) {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction scopedXact(txn, MODE_X);
+ ScopedTransaction scopedXact(opCtx, MODE_X);
- WriteUnitOfWork wuow(txn);
- txn->getClient()->getServiceContext()->getOpObserver()->onOpMessage(
- txn,
+ WriteUnitOfWork wuow(opCtx);
+ opCtx->getClient()->getServiceContext()->getOpObserver()->onOpMessage(
+ opCtx,
BSON("msg"
<< "new primary"));
wuow.commit();
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
- txn, "logging transition to primary to oplog", "local.oplog.rs");
+ opCtx, "logging transition to primary to oplog", "local.oplog.rs");
}
- const auto opTimeToReturn = fassertStatusOK(28665, loadLastOpTime(txn));
+ const auto opTimeToReturn = fassertStatusOK(28665, loadLastOpTime(opCtx));
- _shardingOnTransitionToPrimaryHook(txn);
- _dropAllTempCollections(txn);
+ _shardingOnTransitionToPrimaryHook(opCtx);
+ _dropAllTempCollections(opCtx);
serverGlobalParams.featureCompatibility.validateFeaturesAsMaster.store(true);
@@ -460,28 +460,28 @@ void ReplicationCoordinatorExternalStateImpl::forwardSlaveProgress() {
_syncSourceFeedback.forwardSlaveProgress();
}
-OID ReplicationCoordinatorExternalStateImpl::ensureMe(OperationContext* txn) {
+OID ReplicationCoordinatorExternalStateImpl::ensureMe(OperationContext* opCtx) {
std::string myname = getHostName();
OID myRID;
{
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock lock(txn->lockState(), meDatabaseName, MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock lock(opCtx->lockState(), meDatabaseName, MODE_X);
BSONObj me;
// local.me is an identifier for a server for getLastError w:2+
// TODO: handle WriteConflictExceptions below
- if (!Helpers::getSingleton(txn, meCollectionName, me) || !me.hasField("host") ||
+ if (!Helpers::getSingleton(opCtx, meCollectionName, me) || !me.hasField("host") ||
me["host"].String() != myname) {
myRID = OID::gen();
// clean out local.me
- Helpers::emptyCollection(txn, meCollectionName);
+ Helpers::emptyCollection(opCtx, meCollectionName);
// repopulate
BSONObjBuilder b;
b.append("_id", myRID);
b.append("host", myname);
- Helpers::putSingleton(txn, meCollectionName, b.done());
+ Helpers::putSingleton(opCtx, meCollectionName, b.done());
} else {
myRID = me["_id"].OID();
}
@@ -490,11 +490,11 @@ OID ReplicationCoordinatorExternalStateImpl::ensureMe(OperationContext* txn) {
}
StatusWith<BSONObj> ReplicationCoordinatorExternalStateImpl::loadLocalConfigDocument(
- OperationContext* txn) {
+ OperationContext* opCtx) {
try {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
BSONObj config;
- if (!Helpers::getSingleton(txn, configCollectionName, config)) {
+ if (!Helpers::getSingleton(opCtx, configCollectionName, config)) {
return StatusWith<BSONObj>(
ErrorCodes::NoMatchingDocument,
str::stream() << "Did not find replica set configuration document in "
@@ -502,33 +502,33 @@ StatusWith<BSONObj> ReplicationCoordinatorExternalStateImpl::loadLocalConfigDocu
}
return StatusWith<BSONObj>(config);
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "load replica set config", configCollectionName);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "load replica set config", configCollectionName);
} catch (const DBException& ex) {
return StatusWith<BSONObj>(ex.toStatus());
}
}
-Status ReplicationCoordinatorExternalStateImpl::storeLocalConfigDocument(OperationContext* txn,
+Status ReplicationCoordinatorExternalStateImpl::storeLocalConfigDocument(OperationContext* opCtx,
const BSONObj& config) {
try {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbWriteLock(txn->lockState(), configDatabaseName, MODE_X);
- Helpers::putSingleton(txn, configCollectionName, config);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock dbWriteLock(opCtx->lockState(), configDatabaseName, MODE_X);
+ Helpers::putSingleton(opCtx, configCollectionName, config);
return Status::OK();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "save replica set config", configCollectionName);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "save replica set config", configCollectionName);
} catch (const DBException& ex) {
return ex.toStatus();
}
}
StatusWith<LastVote> ReplicationCoordinatorExternalStateImpl::loadLocalLastVoteDocument(
- OperationContext* txn) {
+ OperationContext* opCtx) {
try {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
BSONObj lastVoteObj;
- if (!Helpers::getSingleton(txn, lastVoteCollectionName, lastVoteObj)) {
+ if (!Helpers::getSingleton(opCtx, lastVoteCollectionName, lastVoteObj)) {
return StatusWith<LastVote>(ErrorCodes::NoMatchingDocument,
str::stream()
<< "Did not find replica set lastVote document in "
@@ -537,41 +537,41 @@ StatusWith<LastVote> ReplicationCoordinatorExternalStateImpl::loadLocalLastVoteD
return LastVote::readFromLastVote(lastVoteObj);
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
- txn, "load replica set lastVote", lastVoteCollectionName);
+ opCtx, "load replica set lastVote", lastVoteCollectionName);
} catch (const DBException& ex) {
return StatusWith<LastVote>(ex.toStatus());
}
}
Status ReplicationCoordinatorExternalStateImpl::storeLocalLastVoteDocument(
- OperationContext* txn, const LastVote& lastVote) {
+ OperationContext* opCtx, const LastVote& lastVote) {
BSONObj lastVoteObj = lastVote.toBSON();
try {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbWriteLock(txn->lockState(), lastVoteDatabaseName, MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock dbWriteLock(opCtx->lockState(), lastVoteDatabaseName, MODE_X);
// If there is no last vote document, we want to store one. Otherwise, we only want to
// replace it if the new last vote document would have a higher term. We both check
// the term of the current last vote document and insert the new document under the
// DBLock to synchronize the two operations.
BSONObj result;
- bool exists = Helpers::getSingleton(txn, lastVoteCollectionName, result);
+ bool exists = Helpers::getSingleton(opCtx, lastVoteCollectionName, result);
if (!exists) {
- Helpers::putSingleton(txn, lastVoteCollectionName, lastVoteObj);
+ Helpers::putSingleton(opCtx, lastVoteCollectionName, lastVoteObj);
} else {
StatusWith<LastVote> oldLastVoteDoc = LastVote::readFromLastVote(result);
if (!oldLastVoteDoc.isOK()) {
return oldLastVoteDoc.getStatus();
}
if (lastVote.getTerm() > oldLastVoteDoc.getValue().getTerm()) {
- Helpers::putSingleton(txn, lastVoteCollectionName, lastVoteObj);
+ Helpers::putSingleton(opCtx, lastVoteCollectionName, lastVoteObj);
}
}
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
- txn, "save replica set lastVote", lastVoteCollectionName);
- txn->recoveryUnit()->waitUntilDurable();
+ opCtx, "save replica set lastVote", lastVoteCollectionName);
+ opCtx->recoveryUnit()->waitUntilDurable();
return Status::OK();
} catch (const DBException& ex) {
return ex.toStatus();
@@ -583,18 +583,18 @@ void ReplicationCoordinatorExternalStateImpl::setGlobalTimestamp(ServiceContext*
setNewTimestamp(ctx, newTime);
}
-void ReplicationCoordinatorExternalStateImpl::cleanUpLastApplyBatch(OperationContext* txn) {
- if (_storageInterface->getInitialSyncFlag(txn)) {
+void ReplicationCoordinatorExternalStateImpl::cleanUpLastApplyBatch(OperationContext* opCtx) {
+ if (_storageInterface->getInitialSyncFlag(opCtx)) {
return; // Initial Sync will take over so no cleanup is needed.
}
// This initializes the minvalid document with a null "ts" because older versions (<=3.2)
// get angry if the minValid document is present but doesn't have a "ts" field.
// Consider removing this once we no longer need to support downgrading to 3.2.
- _storageInterface->setMinValidToAtLeast(txn, {});
+ _storageInterface->setMinValidToAtLeast(opCtx, {});
- const auto deleteFromPoint = _storageInterface->getOplogDeleteFromPoint(txn);
- const auto appliedThrough = _storageInterface->getAppliedThrough(txn);
+ const auto deleteFromPoint = _storageInterface->getOplogDeleteFromPoint(opCtx);
+ const auto appliedThrough = _storageInterface->getAppliedThrough(opCtx);
const bool needToDeleteEndOfOplog = !deleteFromPoint.isNull() &&
// This version should never have a non-null deleteFromPoint with a null appliedThrough.
@@ -609,9 +609,9 @@ void ReplicationCoordinatorExternalStateImpl::cleanUpLastApplyBatch(OperationCon
!(appliedThrough.getTimestamp() >= deleteFromPoint);
if (needToDeleteEndOfOplog) {
log() << "Removing unapplied entries starting at: " << deleteFromPoint;
- truncateOplogTo(txn, deleteFromPoint);
+ truncateOplogTo(opCtx, deleteFromPoint);
}
- _storageInterface->setOplogDeleteFromPoint(txn, {}); // clear the deleteFromPoint
+ _storageInterface->setOplogDeleteFromPoint(opCtx, {}); // clear the deleteFromPoint
if (appliedThrough.isNull()) {
// No follow-up work to do.
@@ -620,7 +620,7 @@ void ReplicationCoordinatorExternalStateImpl::cleanUpLastApplyBatch(OperationCon
// Check if we have any unapplied ops in our oplog. It is important that this is done after
// deleting the ragged end of the oplog.
- const auto topOfOplog = fassertStatusOK(40290, loadLastOpTime(txn));
+ const auto topOfOplog = fassertStatusOK(40290, loadLastOpTime(opCtx));
if (appliedThrough == topOfOplog) {
return; // We've applied all the valid oplog we have.
} else if (appliedThrough > topOfOplog) {
@@ -632,7 +632,7 @@ void ReplicationCoordinatorExternalStateImpl::cleanUpLastApplyBatch(OperationCon
log() << "Replaying stored operations from " << appliedThrough << " (exclusive) to "
<< topOfOplog << " (inclusive).";
- DBDirectClient db(txn);
+ DBDirectClient db(opCtx);
auto cursor = db.query(rsOplogName,
QUERY("ts" << BSON("$gte" << appliedThrough.getTimestamp())),
/*batchSize*/ 0,
@@ -658,28 +658,29 @@ void ReplicationCoordinatorExternalStateImpl::cleanUpLastApplyBatch(OperationCon
}
// Apply remaining ops one at at time, but don't log them because they are already logged.
- const bool wereWritesReplicated = txn->writesAreReplicated();
- ON_BLOCK_EXIT([&] { txn->setReplicatedWrites(wereWritesReplicated); });
- txn->setReplicatedWrites(false);
+ const bool wereWritesReplicated = opCtx->writesAreReplicated();
+ ON_BLOCK_EXIT([&] { opCtx->setReplicatedWrites(wereWritesReplicated); });
+ opCtx->setReplicatedWrites(false);
while (cursor->more()) {
auto entry = cursor->nextSafe();
- fassertStatusOK(40294, SyncTail::syncApply(txn, entry, true));
+ fassertStatusOK(40294, SyncTail::syncApply(opCtx, entry, true));
_storageInterface->setAppliedThrough(
- txn, fassertStatusOK(40295, OpTime::parseFromOplogEntry(entry)));
+ opCtx, fassertStatusOK(40295, OpTime::parseFromOplogEntry(entry)));
}
}
-StatusWith<OpTime> ReplicationCoordinatorExternalStateImpl::loadLastOpTime(OperationContext* txn) {
+StatusWith<OpTime> ReplicationCoordinatorExternalStateImpl::loadLastOpTime(
+ OperationContext* opCtx) {
// TODO: handle WriteConflictExceptions below
try {
// If we are doing an initial sync do not read from the oplog.
- if (_storageInterface->getInitialSyncFlag(txn)) {
+ if (_storageInterface->getInitialSyncFlag(opCtx)) {
return {ErrorCodes::InitialSyncFailure, "In the middle of an initial sync."};
}
BSONObj oplogEntry;
- if (!Helpers::getLast(txn, rsOplogName.c_str(), oplogEntry)) {
+ if (!Helpers::getLast(opCtx, rsOplogName.c_str(), oplogEntry)) {
return StatusWith<OpTime>(ErrorCodes::NoMatchingDocument,
str::stream() << "Did not find any entries in "
<< rsOplogName);
@@ -711,17 +712,17 @@ bool ReplicationCoordinatorExternalStateImpl::isSelf(const HostAndPort& host, Se
}
HostAndPort ReplicationCoordinatorExternalStateImpl::getClientHostAndPort(
- const OperationContext* txn) {
- return HostAndPort(txn->getClient()->clientAddress(true));
+ const OperationContext* opCtx) {
+ return HostAndPort(opCtx->getClient()->clientAddress(true));
}
void ReplicationCoordinatorExternalStateImpl::closeConnections() {
getGlobalServiceContext()->getTransportLayer()->endAllSessions(transport::Session::kKeepOpen);
}
-void ReplicationCoordinatorExternalStateImpl::killAllUserOperations(OperationContext* txn) {
- ServiceContext* environment = txn->getServiceContext();
- environment->killAllUserOperations(txn, ErrorCodes::InterruptedDueToReplStateChange);
+void ReplicationCoordinatorExternalStateImpl::killAllUserOperations(OperationContext* opCtx) {
+ ServiceContext* environment = opCtx->getServiceContext();
+ environment->killAllUserOperations(opCtx, ErrorCodes::InterruptedDueToReplStateChange);
}
void ReplicationCoordinatorExternalStateImpl::shardingOnStepDownHook() {
@@ -733,8 +734,8 @@ void ReplicationCoordinatorExternalStateImpl::shardingOnStepDownHook() {
}
void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook(
- OperationContext* txn) {
- auto status = ShardingStateRecovery::recover(txn);
+ OperationContext* opCtx) {
+ auto status = ShardingStateRecovery::recover(opCtx);
if (ErrorCodes::isShutdownError(status.code())) {
// Note: callers of this method don't expect exceptions, so throw only unexpected fatal
@@ -745,7 +746,7 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook
fassertStatusOK(40107, status);
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
- status = Grid::get(txn)->catalogManager()->initializeConfigDatabaseIfNeeded(txn);
+ status = Grid::get(opCtx)->catalogManager()->initializeConfigDatabaseIfNeeded(opCtx);
if (!status.isOK() && status != ErrorCodes::AlreadyInitialized) {
if (ErrorCodes::isShutdownError(status.code())) {
// Don't fassert if we're mid-shutdown, let the shutdown happen gracefully.
@@ -768,8 +769,8 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook
// Since we *just* wrote the cluster ID to the config.version document (via
// ShardingCatalogManager::initializeConfigDatabaseIfNeeded), this should always
// succeed.
- status = ClusterIdentityLoader::get(txn)->loadClusterId(
- txn, repl::ReadConcernLevel::kLocalReadConcern);
+ status = ClusterIdentityLoader::get(opCtx)->loadClusterId(
+ opCtx, repl::ReadConcernLevel::kLocalReadConcern);
if (ErrorCodes::isShutdownError(status.code())) {
// Don't fassert if we're mid-shutdown, let the shutdown happen gracefully.
@@ -780,20 +781,20 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook
}
// Free any leftover locks from previous instantiations.
- auto distLockManager = Grid::get(txn)->catalogClient(txn)->getDistLockManager();
- distLockManager->unlockAll(txn, distLockManager->getProcessID());
+ auto distLockManager = Grid::get(opCtx)->catalogClient(opCtx)->getDistLockManager();
+ distLockManager->unlockAll(opCtx, distLockManager->getProcessID());
// If this is a config server node becoming a primary, start the balancer
- Balancer::get(txn)->initiateBalancer(txn);
+ Balancer::get(opCtx)->initiateBalancer(opCtx);
// Generate and upsert random 20 byte key for the LogicalClock's TimeProofService.
// TODO: SERVER-27768
- } else if (ShardingState::get(txn)->enabled()) {
+ } else if (ShardingState::get(opCtx)->enabled()) {
const auto configsvrConnStr =
- Grid::get(txn)->shardRegistry()->getConfigShard()->getConnString();
- auto status = ShardingState::get(txn)->updateShardIdentityConfigString(
- txn, configsvrConnStr.toString());
+ Grid::get(opCtx)->shardRegistry()->getConfigShard()->getConnString();
+ auto status = ShardingState::get(opCtx)->updateShardIdentityConfigString(
+ opCtx, configsvrConnStr.toString());
if (!status.isOK()) {
warning() << "error encountered while trying to update config connection string to "
<< configsvrConnStr << causedBy(status);
@@ -802,7 +803,7 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook
// There is a slight chance that some stale metadata might have been loaded before the latest
// optime has been recovered, so throw out everything that we have up to now
- ShardingState::get(txn)->markCollectionsNotShardedAtStepdown();
+ ShardingState::get(opCtx)->markCollectionsNotShardedAtStepdown();
}
void ReplicationCoordinatorExternalStateImpl::signalApplierToChooseNewSyncSource() {
@@ -826,7 +827,7 @@ void ReplicationCoordinatorExternalStateImpl::startProducerIfStopped() {
}
}
-void ReplicationCoordinatorExternalStateImpl::_dropAllTempCollections(OperationContext* txn) {
+void ReplicationCoordinatorExternalStateImpl::_dropAllTempCollections(OperationContext* opCtx) {
std::vector<std::string> dbNames;
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
storageEngine->listDatabases(&dbNames);
@@ -837,12 +838,12 @@ void ReplicationCoordinatorExternalStateImpl::_dropAllTempCollections(OperationC
if (*it == "local")
continue;
LOG(2) << "Removing temporary collections from " << *it;
- Database* db = dbHolder().get(txn, *it);
+ Database* db = dbHolder().get(opCtx, *it);
// Since we must be holding the global lock during this function, if listDatabases
// returned this dbname, we should be able to get a reference to it - it can't have
// been dropped.
invariant(db);
- db->clearTmpCollections(txn);
+ db->clearTmpCollections(opCtx);
}
}
@@ -857,11 +858,11 @@ void ReplicationCoordinatorExternalStateImpl::updateCommittedSnapshot(SnapshotNa
manager->setCommittedSnapshot(newCommitPoint);
}
-void ReplicationCoordinatorExternalStateImpl::createSnapshot(OperationContext* txn,
+void ReplicationCoordinatorExternalStateImpl::createSnapshot(OperationContext* opCtx,
SnapshotName name) {
auto manager = getGlobalServiceContext()->getGlobalStorageEngine()->getSnapshotManager();
invariant(manager); // This should never be called if there is no SnapshotManager.
- manager->createSnapshot(txn, name);
+ manager->createSnapshot(opCtx, name);
}
void ReplicationCoordinatorExternalStateImpl::forceSnapshotCreation() {
@@ -882,18 +883,18 @@ double ReplicationCoordinatorExternalStateImpl::getElectionTimeoutOffsetLimitFra
}
bool ReplicationCoordinatorExternalStateImpl::isReadCommittedSupportedByStorageEngine(
- OperationContext* txn) const {
- auto storageEngine = txn->getServiceContext()->getGlobalStorageEngine();
+ OperationContext* opCtx) const {
+ auto storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine();
// This should never be called if the storage engine has not been initialized.
invariant(storageEngine);
return storageEngine->getSnapshotManager();
}
StatusWith<OpTime> ReplicationCoordinatorExternalStateImpl::multiApply(
- OperationContext* txn,
+ OperationContext* opCtx,
MultiApplier::Operations ops,
MultiApplier::ApplyOperationFn applyOperation) {
- return repl::multiApply(txn, _writerPool.get(), std::move(ops), applyOperation);
+ return repl::multiApply(opCtx, _writerPool.get(), std::move(ops), applyOperation);
}
Status ReplicationCoordinatorExternalStateImpl::multiSyncApply(MultiApplier::OperationPtrs* ops) {
@@ -915,20 +916,20 @@ Status ReplicationCoordinatorExternalStateImpl::multiInitialSyncApply(
}
std::unique_ptr<OplogBuffer> ReplicationCoordinatorExternalStateImpl::makeInitialSyncOplogBuffer(
- OperationContext* txn) const {
+ OperationContext* opCtx) const {
if (initialSyncOplogBuffer == kCollectionOplogBufferName) {
invariant(initialSyncOplogBufferPeekCacheSize >= 0);
OplogBufferCollection::Options options;
options.peekCacheSize = std::size_t(initialSyncOplogBufferPeekCacheSize);
return stdx::make_unique<OplogBufferProxy>(
- stdx::make_unique<OplogBufferCollection>(StorageInterface::get(txn), options));
+ stdx::make_unique<OplogBufferCollection>(StorageInterface::get(opCtx), options));
} else {
return stdx::make_unique<OplogBufferBlockingQueue>();
}
}
std::unique_ptr<OplogBuffer> ReplicationCoordinatorExternalStateImpl::makeSteadyStateOplogBuffer(
- OperationContext* txn) const {
+ OperationContext* opCtx) const {
return stdx::make_unique<OplogBufferBlockingQueue>();
}
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.h b/src/mongo/db/repl/replication_coordinator_external_state_impl.h
index b0d9487eaaf..16c5adf0be6 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.h
@@ -64,47 +64,47 @@ public:
virtual ~ReplicationCoordinatorExternalStateImpl();
virtual void startThreads(const ReplSettings& settings) override;
virtual void startInitialSync(OnInitialSyncFinishedFn finished) override;
- virtual void startSteadyStateReplication(OperationContext* txn,
+ virtual void startSteadyStateReplication(OperationContext* opCtx,
ReplicationCoordinator* replCoord) override;
- virtual void stopDataReplication(OperationContext* txn) override;
- virtual void runOnInitialSyncThread(stdx::function<void(OperationContext* txn)> run) override;
+ virtual void stopDataReplication(OperationContext* opCtx) override;
+ virtual void runOnInitialSyncThread(stdx::function<void(OperationContext* opCtx)> run) override;
- virtual bool isInitialSyncFlagSet(OperationContext* txn) override;
+ virtual bool isInitialSyncFlagSet(OperationContext* opCtx) override;
- virtual void startMasterSlave(OperationContext* txn);
- virtual void shutdown(OperationContext* txn);
+ virtual void startMasterSlave(OperationContext* opCtx);
+ virtual void shutdown(OperationContext* opCtx);
virtual executor::TaskExecutor* getTaskExecutor() const override;
virtual OldThreadPool* getDbWorkThreadPool() const override;
- virtual Status runRepairOnLocalDB(OperationContext* txn) override;
- virtual Status initializeReplSetStorage(OperationContext* txn, const BSONObj& config);
- void onDrainComplete(OperationContext* txn) override;
- OpTime onTransitionToPrimary(OperationContext* txn, bool isV1ElectionProtocol) override;
+ virtual Status runRepairOnLocalDB(OperationContext* opCtx) override;
+ virtual Status initializeReplSetStorage(OperationContext* opCtx, const BSONObj& config);
+ void onDrainComplete(OperationContext* opCtx) override;
+ OpTime onTransitionToPrimary(OperationContext* opCtx, bool isV1ElectionProtocol) override;
virtual void forwardSlaveProgress();
- virtual OID ensureMe(OperationContext* txn);
+ virtual OID ensureMe(OperationContext* opCtx);
virtual bool isSelf(const HostAndPort& host, ServiceContext* service);
- virtual StatusWith<BSONObj> loadLocalConfigDocument(OperationContext* txn);
- virtual Status storeLocalConfigDocument(OperationContext* txn, const BSONObj& config);
- virtual StatusWith<LastVote> loadLocalLastVoteDocument(OperationContext* txn);
- virtual Status storeLocalLastVoteDocument(OperationContext* txn, const LastVote& lastVote);
+ virtual StatusWith<BSONObj> loadLocalConfigDocument(OperationContext* opCtx);
+ virtual Status storeLocalConfigDocument(OperationContext* opCtx, const BSONObj& config);
+ virtual StatusWith<LastVote> loadLocalLastVoteDocument(OperationContext* opCtx);
+ virtual Status storeLocalLastVoteDocument(OperationContext* opCtx, const LastVote& lastVote);
virtual void setGlobalTimestamp(ServiceContext* service, const Timestamp& newTime);
- virtual StatusWith<OpTime> loadLastOpTime(OperationContext* txn);
- virtual void cleanUpLastApplyBatch(OperationContext* txn);
- virtual HostAndPort getClientHostAndPort(const OperationContext* txn);
+ virtual StatusWith<OpTime> loadLastOpTime(OperationContext* opCtx);
+ virtual void cleanUpLastApplyBatch(OperationContext* opCtx);
+ virtual HostAndPort getClientHostAndPort(const OperationContext* opCtx);
virtual void closeConnections();
- virtual void killAllUserOperations(OperationContext* txn);
+ virtual void killAllUserOperations(OperationContext* opCtx);
virtual void shardingOnStepDownHook();
virtual void signalApplierToChooseNewSyncSource();
virtual void stopProducer();
virtual void startProducerIfStopped();
void dropAllSnapshots() final;
void updateCommittedSnapshot(SnapshotName newCommitPoint) final;
- void createSnapshot(OperationContext* txn, SnapshotName name) final;
+ void createSnapshot(OperationContext* opCtx, SnapshotName name) final;
void forceSnapshotCreation() final;
virtual bool snapshotsEnabled() const;
virtual void notifyOplogMetadataWaiters();
virtual double getElectionTimeoutOffsetLimitFraction() const;
- virtual bool isReadCommittedSupportedByStorageEngine(OperationContext* txn) const;
- virtual StatusWith<OpTime> multiApply(OperationContext* txn,
+ virtual bool isReadCommittedSupportedByStorageEngine(OperationContext* opCtx) const;
+ virtual StatusWith<OpTime> multiApply(OperationContext* opCtx,
MultiApplier::Operations ops,
MultiApplier::ApplyOperationFn applyOperation) override;
virtual Status multiSyncApply(MultiApplier::OperationPtrs* ops) override;
@@ -112,9 +112,9 @@ public:
const HostAndPort& source,
AtomicUInt32* fetchCount) override;
virtual std::unique_ptr<OplogBuffer> makeInitialSyncOplogBuffer(
- OperationContext* txn) const override;
+ OperationContext* opCtx) const override;
virtual std::unique_ptr<OplogBuffer> makeSteadyStateOplogBuffer(
- OperationContext* txn) const override;
+ OperationContext* opCtx) const override;
virtual bool shouldUseDataReplicatorInitialSync() const override;
virtual std::size_t getOplogFetcherMaxFetcherRestarts() const override;
@@ -130,7 +130,7 @@ private:
/**
* Stops data replication and returns with 'lock' locked.
*/
- void _stopDataReplication_inlock(OperationContext* txn, UniqueLock* lock);
+ void _stopDataReplication_inlock(OperationContext* opCtx, UniqueLock* lock);
/**
* Called when the instance transitions to primary in order to notify a potentially sharded host
@@ -138,15 +138,15 @@ private:
*
* Throws on errors.
*/
- void _shardingOnTransitionToPrimaryHook(OperationContext* txn);
+ void _shardingOnTransitionToPrimaryHook(OperationContext* opCtx);
/**
* Drops all temporary collections on all databases except "local".
*
* The implementation may assume that the caller has acquired the global exclusive lock
- * for "txn".
+ * for "opCtx".
*/
- void _dropAllTempCollections(OperationContext* txn);
+ void _dropAllTempCollections(OperationContext* opCtx);
// Guards starting threads and setting _startedThreads
stdx::mutex _threadMutex;
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp b/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
index 274806326a6..87e0e8c6af6 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
@@ -61,7 +61,7 @@ ReplicationCoordinatorExternalStateMock::ReplicationCoordinatorExternalStateMock
ReplicationCoordinatorExternalStateMock::~ReplicationCoordinatorExternalStateMock() {}
void ReplicationCoordinatorExternalStateMock::runOnInitialSyncThread(
- stdx::function<void(OperationContext* txn)> run) {
+ stdx::function<void(OperationContext* opCtx)> run) {
log() << "not running initial sync during test.";
}
@@ -83,13 +83,13 @@ void ReplicationCoordinatorExternalStateMock::stopDataReplication(OperationConte
void ReplicationCoordinatorExternalStateMock::startMasterSlave(OperationContext*) {}
-Status ReplicationCoordinatorExternalStateMock::runRepairOnLocalDB(OperationContext* txn) {
+Status ReplicationCoordinatorExternalStateMock::runRepairOnLocalDB(OperationContext* opCtx) {
return Status::OK();
}
-Status ReplicationCoordinatorExternalStateMock::initializeReplSetStorage(OperationContext* txn,
+Status ReplicationCoordinatorExternalStateMock::initializeReplSetStorage(OperationContext* opCtx,
const BSONObj& config) {
- return storeLocalConfigDocument(txn, config);
+ return storeLocalConfigDocument(opCtx, config);
}
void ReplicationCoordinatorExternalStateMock::shutdown(OperationContext*) {}
@@ -118,7 +118,7 @@ void ReplicationCoordinatorExternalStateMock::addSelf(const HostAndPort& host) {
}
HostAndPort ReplicationCoordinatorExternalStateMock::getClientHostAndPort(
- const OperationContext* txn) {
+ const OperationContext* opCtx) {
return _clientHostAndPort;
}
@@ -128,11 +128,11 @@ void ReplicationCoordinatorExternalStateMock::setClientHostAndPort(
}
StatusWith<BSONObj> ReplicationCoordinatorExternalStateMock::loadLocalConfigDocument(
- OperationContext* txn) {
+ OperationContext* opCtx) {
return _localRsConfigDocument;
}
-Status ReplicationCoordinatorExternalStateMock::storeLocalConfigDocument(OperationContext* txn,
+Status ReplicationCoordinatorExternalStateMock::storeLocalConfigDocument(OperationContext* opCtx,
const BSONObj& config) {
{
stdx::unique_lock<stdx::mutex> lock(_shouldHangConfigMutex);
@@ -153,12 +153,12 @@ void ReplicationCoordinatorExternalStateMock::setLocalConfigDocument(
}
StatusWith<LastVote> ReplicationCoordinatorExternalStateMock::loadLocalLastVoteDocument(
- OperationContext* txn) {
+ OperationContext* opCtx) {
return _localRsLastVoteDocument;
}
Status ReplicationCoordinatorExternalStateMock::storeLocalLastVoteDocument(
- OperationContext* txn, const LastVote& lastVote) {
+ OperationContext* opCtx, const LastVote& lastVote) {
{
stdx::unique_lock<stdx::mutex> lock(_shouldHangLastVoteMutex);
while (_storeLocalLastVoteDocumentShouldHang) {
@@ -180,9 +180,10 @@ void ReplicationCoordinatorExternalStateMock::setLocalLastVoteDocument(
void ReplicationCoordinatorExternalStateMock::setGlobalTimestamp(ServiceContext* service,
const Timestamp& newTime) {}
-void ReplicationCoordinatorExternalStateMock::cleanUpLastApplyBatch(OperationContext* txn) {}
+void ReplicationCoordinatorExternalStateMock::cleanUpLastApplyBatch(OperationContext* opCtx) {}
-StatusWith<OpTime> ReplicationCoordinatorExternalStateMock::loadLastOpTime(OperationContext* txn) {
+StatusWith<OpTime> ReplicationCoordinatorExternalStateMock::loadLastOpTime(
+ OperationContext* opCtx) {
return _lastOpTime;
}
@@ -222,7 +223,7 @@ void ReplicationCoordinatorExternalStateMock::closeConnections() {
_connectionsClosed = true;
}
-void ReplicationCoordinatorExternalStateMock::killAllUserOperations(OperationContext* txn) {}
+void ReplicationCoordinatorExternalStateMock::killAllUserOperations(OperationContext* opCtx) {}
void ReplicationCoordinatorExternalStateMock::shardingOnStepDownHook() {}
@@ -237,7 +238,7 @@ void ReplicationCoordinatorExternalStateMock::dropAllSnapshots() {}
void ReplicationCoordinatorExternalStateMock::updateCommittedSnapshot(SnapshotName newCommitPoint) {
}
-void ReplicationCoordinatorExternalStateMock::createSnapshot(OperationContext* txn,
+void ReplicationCoordinatorExternalStateMock::createSnapshot(OperationContext* opCtx,
SnapshotName name) {}
void ReplicationCoordinatorExternalStateMock::forceSnapshotCreation() {}
@@ -257,7 +258,7 @@ double ReplicationCoordinatorExternalStateMock::getElectionTimeoutOffsetLimitFra
}
bool ReplicationCoordinatorExternalStateMock::isReadCommittedSupportedByStorageEngine(
- OperationContext* txn) const {
+ OperationContext* opCtx) const {
return _isReadCommittedSupported;
}
@@ -276,12 +277,12 @@ Status ReplicationCoordinatorExternalStateMock::multiInitialSyncApply(
}
std::unique_ptr<OplogBuffer> ReplicationCoordinatorExternalStateMock::makeInitialSyncOplogBuffer(
- OperationContext* txn) const {
+ OperationContext* opCtx) const {
return stdx::make_unique<OplogBufferBlockingQueue>();
}
std::unique_ptr<OplogBuffer> ReplicationCoordinatorExternalStateMock::makeSteadyStateOplogBuffer(
- OperationContext* txn) const {
+ OperationContext* opCtx) const {
return stdx::make_unique<OplogBufferBlockingQueue>();
}
@@ -297,9 +298,9 @@ void ReplicationCoordinatorExternalStateMock::setIsReadCommittedEnabled(bool val
_isReadCommittedSupported = val;
}
-void ReplicationCoordinatorExternalStateMock::onDrainComplete(OperationContext* txn) {}
+void ReplicationCoordinatorExternalStateMock::onDrainComplete(OperationContext* opCtx) {}
-OpTime ReplicationCoordinatorExternalStateMock::onTransitionToPrimary(OperationContext* txn,
+OpTime ReplicationCoordinatorExternalStateMock::onTransitionToPrimary(OperationContext* opCtx,
bool isV1ElectionProtocol) {
if (isV1ElectionProtocol) {
_lastOpTime = OpTime(Timestamp(1, 0), 1);
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_mock.h b/src/mongo/db/repl/replication_coordinator_external_state_mock.h
index 1b575ede697..c33044a2510 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_mock.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state_mock.h
@@ -58,46 +58,46 @@ public:
virtual ~ReplicationCoordinatorExternalStateMock();
virtual void startThreads(const ReplSettings& settings) override;
virtual void startInitialSync(OnInitialSyncFinishedFn finished) override;
- virtual void startSteadyStateReplication(OperationContext* txn,
+ virtual void startSteadyStateReplication(OperationContext* opCtx,
ReplicationCoordinator* replCoord) override;
- virtual void stopDataReplication(OperationContext* txn) override;
- virtual void runOnInitialSyncThread(stdx::function<void(OperationContext* txn)> run) override;
- virtual bool isInitialSyncFlagSet(OperationContext* txn) override;
+ virtual void stopDataReplication(OperationContext* opCtx) override;
+ virtual void runOnInitialSyncThread(stdx::function<void(OperationContext* opCtx)> run) override;
+ virtual bool isInitialSyncFlagSet(OperationContext* opCtx) override;
virtual void startMasterSlave(OperationContext*);
- virtual void shutdown(OperationContext* txn);
+ virtual void shutdown(OperationContext* opCtx);
virtual executor::TaskExecutor* getTaskExecutor() const override;
virtual OldThreadPool* getDbWorkThreadPool() const override;
- virtual Status runRepairOnLocalDB(OperationContext* txn) override;
- virtual Status initializeReplSetStorage(OperationContext* txn, const BSONObj& config);
- void onDrainComplete(OperationContext* txn) override;
- OpTime onTransitionToPrimary(OperationContext* txn, bool isV1ElectionProtocol) override;
+ virtual Status runRepairOnLocalDB(OperationContext* opCtx) override;
+ virtual Status initializeReplSetStorage(OperationContext* opCtx, const BSONObj& config);
+ void onDrainComplete(OperationContext* opCtx) override;
+ OpTime onTransitionToPrimary(OperationContext* opCtx, bool isV1ElectionProtocol) override;
virtual void forwardSlaveProgress();
virtual OID ensureMe(OperationContext*);
virtual bool isSelf(const HostAndPort& host, ServiceContext* service);
- virtual HostAndPort getClientHostAndPort(const OperationContext* txn);
- virtual StatusWith<BSONObj> loadLocalConfigDocument(OperationContext* txn);
- virtual Status storeLocalConfigDocument(OperationContext* txn, const BSONObj& config);
- virtual StatusWith<LastVote> loadLocalLastVoteDocument(OperationContext* txn);
- virtual Status storeLocalLastVoteDocument(OperationContext* txn, const LastVote& lastVote);
+ virtual HostAndPort getClientHostAndPort(const OperationContext* opCtx);
+ virtual StatusWith<BSONObj> loadLocalConfigDocument(OperationContext* opCtx);
+ virtual Status storeLocalConfigDocument(OperationContext* opCtx, const BSONObj& config);
+ virtual StatusWith<LastVote> loadLocalLastVoteDocument(OperationContext* opCtx);
+ virtual Status storeLocalLastVoteDocument(OperationContext* opCtx, const LastVote& lastVote);
virtual void setGlobalTimestamp(ServiceContext* service, const Timestamp& newTime);
- virtual StatusWith<OpTime> loadLastOpTime(OperationContext* txn);
- virtual void cleanUpLastApplyBatch(OperationContext* txn);
+ virtual StatusWith<OpTime> loadLastOpTime(OperationContext* opCtx);
+ virtual void cleanUpLastApplyBatch(OperationContext* opCtx);
virtual void closeConnections();
- virtual void killAllUserOperations(OperationContext* txn);
+ virtual void killAllUserOperations(OperationContext* opCtx);
virtual void shardingOnStepDownHook();
virtual void signalApplierToChooseNewSyncSource();
virtual void stopProducer();
virtual void startProducerIfStopped();
virtual void dropAllSnapshots();
virtual void updateCommittedSnapshot(SnapshotName newCommitPoint);
- virtual void createSnapshot(OperationContext* txn, SnapshotName name);
+ virtual void createSnapshot(OperationContext* opCtx, SnapshotName name);
virtual void forceSnapshotCreation();
virtual bool snapshotsEnabled() const;
virtual void notifyOplogMetadataWaiters();
virtual double getElectionTimeoutOffsetLimitFraction() const;
- virtual bool isReadCommittedSupportedByStorageEngine(OperationContext* txn) const;
- virtual StatusWith<OpTime> multiApply(OperationContext* txn,
+ virtual bool isReadCommittedSupportedByStorageEngine(OperationContext* opCtx) const;
+ virtual StatusWith<OpTime> multiApply(OperationContext* opCtx,
MultiApplier::Operations ops,
MultiApplier::ApplyOperationFn applyOperation) override;
virtual Status multiSyncApply(MultiApplier::OperationPtrs* ops) override;
@@ -105,9 +105,9 @@ public:
const HostAndPort& source,
AtomicUInt32* fetchCount) override;
virtual std::unique_ptr<OplogBuffer> makeInitialSyncOplogBuffer(
- OperationContext* txn) const override;
+ OperationContext* opCtx) const override;
virtual std::unique_ptr<OplogBuffer> makeSteadyStateOplogBuffer(
- OperationContext* txn) const override;
+ OperationContext* opCtx) const override;
virtual bool shouldUseDataReplicatorInitialSync() const override;
virtual std::size_t getOplogFetcherMaxFetcherRestarts() const override;
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 8ad99d7e24a..a238cee74de 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -411,8 +411,8 @@ void ReplicationCoordinatorImpl::appendConnectionStats(executor::ConnectionPoolS
_replExecutor.appendConnectionStats(stats);
}
-bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* txn) {
- StatusWith<LastVote> lastVote = _externalState->loadLocalLastVoteDocument(txn);
+bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* opCtx) {
+ StatusWith<LastVote> lastVote = _externalState->loadLocalLastVoteDocument(opCtx);
if (!lastVote.isOK()) {
if (lastVote.getStatus() == ErrorCodes::NoMatchingDocument) {
log() << "Did not find local voted for document at startup.";
@@ -426,7 +426,7 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* txn) {
_topCoord->loadLastVote(lastVote.getValue());
}
- StatusWith<BSONObj> cfg = _externalState->loadLocalConfigDocument(txn);
+ StatusWith<BSONObj> cfg = _externalState->loadLocalConfigDocument(opCtx);
if (!cfg.isOK()) {
log() << "Did not find local replica set configuration document at startup; "
<< cfg.getStatus();
@@ -443,8 +443,8 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* txn) {
}
// Read the last op from the oplog after cleaning up any partially applied batches.
- _externalState->cleanUpLastApplyBatch(txn);
- auto lastOpTimeStatus = _externalState->loadLastOpTime(txn);
+ _externalState->cleanUpLastApplyBatch(opCtx);
+ auto lastOpTimeStatus = _externalState->loadLastOpTime(opCtx);
// Use a callback here, because _finishLoadLocalConfig calls isself() which requires
// that the server's networking layer be up and running and accepting connections, which
@@ -546,12 +546,12 @@ void ReplicationCoordinatorImpl::_finishLoadLocalConfig(
_performPostMemberStateUpdateAction(action);
if (!isArbiter) {
_externalState->startThreads(_settings);
- invariant(cbData.txn);
- _startDataReplication(cbData.txn);
+ invariant(cbData.opCtx);
+ _startDataReplication(cbData.opCtx);
}
}
-void ReplicationCoordinatorImpl::_stopDataReplication(OperationContext* txn) {
+void ReplicationCoordinatorImpl::_stopDataReplication(OperationContext* opCtx) {
std::shared_ptr<DataReplicator> drCopy;
{
LockGuard lk(_mutex);
@@ -569,19 +569,20 @@ void ReplicationCoordinatorImpl::_stopDataReplication(OperationContext* txn) {
}
LOG(1) << "ReplicationCoordinatorImpl::_stopDataReplication calling "
"ReplCoordExtState::stopDataReplication.";
- _externalState->stopDataReplication(txn);
+ _externalState->stopDataReplication(opCtx);
}
-void ReplicationCoordinatorImpl::_startDataReplication(OperationContext* txn,
+void ReplicationCoordinatorImpl::_startDataReplication(OperationContext* opCtx,
stdx::function<void()> startCompleted) {
// Check to see if we need to do an initial sync.
const auto lastOpTime = getMyLastAppliedOpTime();
- const auto needsInitialSync = lastOpTime.isNull() || _externalState->isInitialSyncFlagSet(txn);
+ const auto needsInitialSync =
+ lastOpTime.isNull() || _externalState->isInitialSyncFlagSet(opCtx);
if (!needsInitialSync) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
if (!_inShutdown) {
// Start steady replication, since we already have data.
- _externalState->startSteadyStateReplication(txn, this);
+ _externalState->startSteadyStateReplication(opCtx, this);
}
return;
}
@@ -624,9 +625,9 @@ void ReplicationCoordinatorImpl::_startDataReplication(OperationContext* txn,
startCompleted();
}
// Repair local db (to compact it).
- auto txn = cc().makeOperationContext();
- uassertStatusOK(_externalState->runRepairOnLocalDB(txn.get()));
- _externalState->startSteadyStateReplication(txn.get(), this);
+ auto opCtx = cc().makeOperationContext();
+ uassertStatusOK(_externalState->runRepairOnLocalDB(opCtx.get()));
+ _externalState->startSteadyStateReplication(opCtx.get(), this);
};
std::shared_ptr<DataReplicator> drCopy;
@@ -644,7 +645,7 @@ void ReplicationCoordinatorImpl::_startDataReplication(OperationContext* txn,
}
// DataReplicator::startup() must be called outside lock because it uses features (eg.
// setting the initial sync flag) which depend on the ReplicationCoordinatorImpl.
- uassertStatusOK(drCopy->startup(txn, numInitialSyncAttempts.load()));
+ uassertStatusOK(drCopy->startup(opCtx, numInitialSyncAttempts.load()));
} catch (...) {
auto status = exceptionToStatus();
log() << "Initial Sync failed to start: " << status;
@@ -655,19 +656,19 @@ void ReplicationCoordinatorImpl::_startDataReplication(OperationContext* txn,
fassertFailedWithStatusNoTrace(40354, status);
}
} else {
- _externalState->startInitialSync([this, startCompleted](OperationContext* txn) {
+ _externalState->startInitialSync([this, startCompleted](OperationContext* opCtx) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
if (!_inShutdown) {
if (startCompleted) {
startCompleted();
}
- _externalState->startSteadyStateReplication(txn, this);
+ _externalState->startSteadyStateReplication(opCtx, this);
}
});
}
}
-void ReplicationCoordinatorImpl::startup(OperationContext* txn) {
+void ReplicationCoordinatorImpl::startup(OperationContext* opCtx) {
if (!isReplEnabled()) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
_setConfigState_inlock(kConfigReplicationDisabled);
@@ -675,7 +676,7 @@ void ReplicationCoordinatorImpl::startup(OperationContext* txn) {
}
{
- OID rid = _externalState->ensureMe(txn);
+ OID rid = _externalState->ensureMe(opCtx);
stdx::lock_guard<stdx::mutex> lk(_mutex);
fassert(18822, !_inShutdown);
@@ -687,16 +688,16 @@ void ReplicationCoordinatorImpl::startup(OperationContext* txn) {
if (!_settings.usingReplSets()) {
// Must be Master/Slave
invariant(_settings.isMaster() || _settings.isSlave());
- _externalState->startMasterSlave(txn);
+ _externalState->startMasterSlave(opCtx);
return;
}
_replExecutor.startup();
_topCoord->setStorageEngineSupportsReadCommitted(
- _externalState->isReadCommittedSupportedByStorageEngine(txn));
+ _externalState->isReadCommittedSupportedByStorageEngine(opCtx));
- bool doneLoadingConfig = _startLoadLocalConfig(txn);
+ bool doneLoadingConfig = _startLoadLocalConfig(opCtx);
if (doneLoadingConfig) {
// If we're not done loading the config, then the config state will be set by
// _finishLoadLocalConfig.
@@ -706,7 +707,7 @@ void ReplicationCoordinatorImpl::startup(OperationContext* txn) {
}
}
-void ReplicationCoordinatorImpl::shutdown(OperationContext* txn) {
+void ReplicationCoordinatorImpl::shutdown(OperationContext* opCtx) {
// Shutdown must:
// * prevent new threads from blocking in awaitReplication
// * wake up all existing threads blocking in awaitReplication
@@ -759,7 +760,7 @@ void ReplicationCoordinatorImpl::shutdown(OperationContext* txn) {
drCopy->join();
drCopy.reset();
}
- _externalState->shutdown(txn);
+ _externalState->shutdown(opCtx);
_replExecutor.shutdown();
_replExecutor.join();
}
@@ -892,7 +893,7 @@ ReplicationCoordinator::ApplierState ReplicationCoordinatorImpl::getApplierState
return _applierState;
}
-void ReplicationCoordinatorImpl::signalDrainComplete(OperationContext* txn,
+void ReplicationCoordinatorImpl::signalDrainComplete(OperationContext* opCtx,
long long termWhenBufferIsEmpty) {
// This logic is a little complicated in order to avoid acquiring the global exclusive lock
// unnecessarily. This is important because the applier may call signalDrainComplete()
@@ -918,7 +919,7 @@ void ReplicationCoordinatorImpl::signalDrainComplete(OperationContext* txn,
// temp collection isn't introduced on the new primary before we drop all the temp collections.
// When we go to drop all temp collections, we must replicate the drops.
- invariant(txn->writesAreReplicated());
+ invariant(opCtx->writesAreReplicated());
stdx::unique_lock<stdx::mutex> lk(_mutex);
if (_applierState != ApplierState::Draining) {
@@ -926,7 +927,7 @@ void ReplicationCoordinatorImpl::signalDrainComplete(OperationContext* txn,
}
lk.unlock();
- _externalState->onDrainComplete(txn);
+ _externalState->onDrainComplete(opCtx);
if (MONGO_FAIL_POINT(transitionToPrimaryHangBeforeTakingGlobalExclusiveLock)) {
log() << "transition to primary - "
@@ -943,8 +944,8 @@ void ReplicationCoordinatorImpl::signalDrainComplete(OperationContext* txn,
}
}
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite globalWriteLock(txn->lockState());
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite globalWriteLock(opCtx->lockState());
lk.lock();
// Exit drain mode when the buffer is empty in the current term and we're in Draining mode.
@@ -959,7 +960,7 @@ void ReplicationCoordinatorImpl::signalDrainComplete(OperationContext* txn,
_canAcceptNonLocalWrites = true;
lk.unlock();
- _setFirstOpTimeOfMyTerm(_externalState->onTransitionToPrimary(txn, isV1ElectionProtocol()));
+ _setFirstOpTimeOfMyTerm(_externalState->onTransitionToPrimary(opCtx, isV1ElectionProtocol()));
lk.lock();
// Must calculate the commit level again because firstOpTimeOfMyTerm wasn't set when we logged
@@ -1232,11 +1233,11 @@ OpTime ReplicationCoordinatorImpl::getMyLastDurableOpTime() const {
return _getMyLastDurableOpTime_inlock();
}
-Status ReplicationCoordinatorImpl::waitUntilOpTimeForRead(OperationContext* txn,
+Status ReplicationCoordinatorImpl::waitUntilOpTimeForRead(OperationContext* opCtx,
const ReadConcernArgs& settings) {
// We should never wait for replication if we are holding any locks, because this can
// potentially block for long time while doing network activity.
- if (txn->lockState()->isLocked()) {
+ if (opCtx->lockState()->isLocked()) {
return {ErrorCodes::IllegalOperation,
"Waiting for replication not allowed while holding a lock"};
}
@@ -1291,10 +1292,10 @@ Status ReplicationCoordinatorImpl::waitUntilOpTimeForRead(OperationContext* txn,
// If we are doing a majority read concern we only need to wait for a new snapshot.
if (isMajorityReadConcern) {
// Wait for a snapshot that meets our needs (< targetOpTime).
- LOG(3) << "waitUntilOpTime: waiting for a new snapshot until " << txn->getDeadline();
+ LOG(3) << "waitUntilOpTime: waiting for a new snapshot until " << opCtx->getDeadline();
auto waitStatus =
- txn->waitForConditionOrInterruptNoAssert(_currentCommittedSnapshotCond, lock);
+ opCtx->waitForConditionOrInterruptNoAssert(_currentCommittedSnapshotCond, lock);
if (!waitStatus.isOK()) {
return waitStatus;
}
@@ -1305,12 +1306,12 @@ Status ReplicationCoordinatorImpl::waitUntilOpTimeForRead(OperationContext* txn,
// We just need to wait for the opTime to catch up to what we need (not majority RC).
stdx::condition_variable condVar;
WaiterInfoGuard waitInfo(
- &_opTimeWaiterList, txn->getOpID(), targetOpTime, nullptr, &condVar);
+ &_opTimeWaiterList, opCtx->getOpID(), targetOpTime, nullptr, &condVar);
LOG(3) << "waituntilOpTime: waiting for OpTime " << waitInfo.waiter << " until "
- << txn->getDeadline();
+ << opCtx->getDeadline();
- auto waitStatus = txn->waitForConditionOrInterruptNoAssert(condVar, lock);
+ auto waitStatus = opCtx->waitForConditionOrInterruptNoAssert(condVar, lock);
if (!waitStatus.isOK()) {
return waitStatus;
}
@@ -1591,37 +1592,37 @@ bool ReplicationCoordinatorImpl::_haveTaggedNodesReachedOpTime_inlock(
}
ReplicationCoordinator::StatusAndDuration ReplicationCoordinatorImpl::awaitReplication(
- OperationContext* txn, const OpTime& opTime, const WriteConcernOptions& writeConcern) {
+ OperationContext* opCtx, const OpTime& opTime, const WriteConcernOptions& writeConcern) {
Timer timer;
WriteConcernOptions fixedWriteConcern = populateUnsetWriteConcernOptionsSyncMode(writeConcern);
stdx::unique_lock<stdx::mutex> lock(_mutex);
auto status =
- _awaitReplication_inlock(&lock, txn, opTime, SnapshotName::min(), fixedWriteConcern);
+ _awaitReplication_inlock(&lock, opCtx, opTime, SnapshotName::min(), fixedWriteConcern);
return {std::move(status), duration_cast<Milliseconds>(timer.elapsed())};
}
ReplicationCoordinator::StatusAndDuration
ReplicationCoordinatorImpl::awaitReplicationOfLastOpForClient(
- OperationContext* txn, const WriteConcernOptions& writeConcern) {
+ OperationContext* opCtx, const WriteConcernOptions& writeConcern) {
Timer timer;
WriteConcernOptions fixedWriteConcern = populateUnsetWriteConcernOptionsSyncMode(writeConcern);
stdx::unique_lock<stdx::mutex> lock(_mutex);
- const auto& clientInfo = ReplClientInfo::forClient(txn->getClient());
+ const auto& clientInfo = ReplClientInfo::forClient(opCtx->getClient());
auto status = _awaitReplication_inlock(
- &lock, txn, clientInfo.getLastOp(), clientInfo.getLastSnapshot(), fixedWriteConcern);
+ &lock, opCtx, clientInfo.getLastOp(), clientInfo.getLastSnapshot(), fixedWriteConcern);
return {std::move(status), duration_cast<Milliseconds>(timer.elapsed())};
}
Status ReplicationCoordinatorImpl::_awaitReplication_inlock(
stdx::unique_lock<stdx::mutex>* lock,
- OperationContext* txn,
+ OperationContext* opCtx,
const OpTime& opTime,
SnapshotName minSnapshot,
const WriteConcernOptions& writeConcern) {
// We should never wait for replication if we are holding any locks, because this can
// potentially block for long time while doing network activity.
- if (txn->lockState()->isLocked()) {
+ if (opCtx->lockState()->isLocked()) {
return {ErrorCodes::IllegalOperation,
"Waiting for replication not allowed while holding a lock"};
}
@@ -1668,7 +1669,7 @@ Status ReplicationCoordinatorImpl::_awaitReplication_inlock(
return stepdownStatus;
}
- auto interruptStatus = txn->checkForInterruptNoAssert();
+ auto interruptStatus = opCtx->checkForInterruptNoAssert();
if (!interruptStatus.isOK()) {
return interruptStatus;
}
@@ -1681,7 +1682,7 @@ Status ReplicationCoordinatorImpl::_awaitReplication_inlock(
}
}
- auto clockSource = txn->getServiceContext()->getFastClockSource();
+ auto clockSource = opCtx->getServiceContext()->getFastClockSource();
const auto wTimeoutDate = [&]() -> const Date_t {
if (writeConcern.wDeadline != Date_t::max()) {
return writeConcern.wDeadline;
@@ -1696,14 +1697,14 @@ Status ReplicationCoordinatorImpl::_awaitReplication_inlock(
// Must hold _mutex before constructing waitInfo as it will modify _replicationWaiterList
stdx::condition_variable condVar;
WaiterInfoGuard waitInfo(
- &_replicationWaiterList, txn->getOpID(), opTime, &writeConcern, &condVar);
+ &_replicationWaiterList, opCtx->getOpID(), opTime, &writeConcern, &condVar);
while (!_doneWaitingForReplication_inlock(opTime, minSnapshot, writeConcern)) {
if (_inShutdown) {
return {ErrorCodes::ShutdownInProgress, "Replication is being shut down"};
}
- auto status = txn->waitForConditionOrInterruptNoAssertUntil(condVar, *lock, wTimeoutDate);
+ auto status = opCtx->waitForConditionOrInterruptNoAssertUntil(condVar, *lock, wTimeoutDate);
if (!status.isOK()) {
return status.getStatus();
}
@@ -1729,7 +1730,7 @@ Status ReplicationCoordinatorImpl::_awaitReplication_inlock(
return _checkIfWriteConcernCanBeSatisfied_inlock(writeConcern);
}
-Status ReplicationCoordinatorImpl::stepDown(OperationContext* txn,
+Status ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
const bool force,
const Milliseconds& waitTime,
const Milliseconds& stepdownTime) {
@@ -1745,12 +1746,12 @@ Status ReplicationCoordinatorImpl::stepDown(OperationContext* txn,
return {ErrorCodes::NotMaster, "not primary so can't step down"};
}
- Lock::GlobalLock globalReadLock(txn->lockState(), MODE_S, Lock::GlobalLock::EnqueueOnly());
+ Lock::GlobalLock globalReadLock(opCtx->lockState(), MODE_S, Lock::GlobalLock::EnqueueOnly());
// We've requested the global shared lock which will stop new writes from coming in,
// but existing writes could take a long time to finish, so kill all user operations
// to help us get the global lock faster.
- _externalState->killAllUserOperations(txn);
+ _externalState->killAllUserOperations(opCtx);
globalReadLock.waitForLock(durationCount<Milliseconds>(stepdownTime));
@@ -1763,7 +1764,7 @@ Status ReplicationCoordinatorImpl::stepDown(OperationContext* txn,
try {
stdx::unique_lock<stdx::mutex> topoLock(_topoMutex);
bool restartHeartbeats = true;
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
while (!_tryToStepDown(waitUntil, stepDownUntil, force)) {
if (restartHeartbeats) {
// We send out a fresh round of heartbeats because stepping down successfully
@@ -1773,7 +1774,7 @@ Status ReplicationCoordinatorImpl::stepDown(OperationContext* txn,
_restartHeartbeats_inlock();
restartHeartbeats = false;
}
- txn->waitForConditionOrInterruptUntil(
+ opCtx->waitForConditionOrInterruptUntil(
_stepDownWaiters, topoLock, std::min(stepDownUntil, waitUntil));
}
} catch (const DBException& ex) {
@@ -1864,14 +1865,14 @@ bool ReplicationCoordinatorImpl::isMasterForReportingPurposes() {
return false;
}
-bool ReplicationCoordinatorImpl::canAcceptWritesForDatabase(OperationContext* txn,
+bool ReplicationCoordinatorImpl::canAcceptWritesForDatabase(OperationContext* opCtx,
StringData dbName) {
// The answer isn't meaningful unless we hold the global lock.
- invariant(txn->lockState()->isLocked());
- return canAcceptWritesForDatabase_UNSAFE(txn, dbName);
+ invariant(opCtx->lockState()->isLocked());
+ return canAcceptWritesForDatabase_UNSAFE(opCtx, dbName);
}
-bool ReplicationCoordinatorImpl::canAcceptWritesForDatabase_UNSAFE(OperationContext* txn,
+bool ReplicationCoordinatorImpl::canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx,
StringData dbName) {
// _canAcceptNonLocalWrites is always true for standalone nodes, always false for nodes
// started with --slave, and adjusted based on primary+drain state in replica sets.
@@ -1889,32 +1890,32 @@ bool ReplicationCoordinatorImpl::canAcceptWritesForDatabase_UNSAFE(OperationCont
return !replAllDead && _settings.isMaster();
}
-bool ReplicationCoordinatorImpl::canAcceptWritesFor(OperationContext* txn,
+bool ReplicationCoordinatorImpl::canAcceptWritesFor(OperationContext* opCtx,
const NamespaceString& ns) {
- invariant(txn->lockState()->isLocked());
- return canAcceptWritesFor_UNSAFE(txn, ns);
+ invariant(opCtx->lockState()->isLocked());
+ return canAcceptWritesFor_UNSAFE(opCtx, ns);
}
-bool ReplicationCoordinatorImpl::canAcceptWritesFor_UNSAFE(OperationContext* txn,
+bool ReplicationCoordinatorImpl::canAcceptWritesFor_UNSAFE(OperationContext* opCtx,
const NamespaceString& ns) {
if (_memberState.rollback() && ns.isOplog()) {
return false;
}
StringData dbName = ns.db();
- return canAcceptWritesForDatabase_UNSAFE(txn, dbName);
+ return canAcceptWritesForDatabase_UNSAFE(opCtx, dbName);
}
-Status ReplicationCoordinatorImpl::checkCanServeReadsFor(OperationContext* txn,
+Status ReplicationCoordinatorImpl::checkCanServeReadsFor(OperationContext* opCtx,
const NamespaceString& ns,
bool slaveOk) {
- invariant(txn->lockState()->isLocked());
- return checkCanServeReadsFor_UNSAFE(txn, ns, slaveOk);
+ invariant(opCtx->lockState()->isLocked());
+ return checkCanServeReadsFor_UNSAFE(opCtx, ns, slaveOk);
}
-Status ReplicationCoordinatorImpl::checkCanServeReadsFor_UNSAFE(OperationContext* txn,
+Status ReplicationCoordinatorImpl::checkCanServeReadsFor_UNSAFE(OperationContext* opCtx,
const NamespaceString& ns,
bool slaveOk) {
- auto client = txn->getClient();
+ auto client = opCtx->getClient();
// Oplog reads are not allowed during STARTUP state, but we make an exception for internal
// reads and master-slave replication. Internel reads are required for cleaning up unfinished
// apply batches. Master-slave never sets the state so we make an exception for it as well.
@@ -1928,7 +1929,7 @@ Status ReplicationCoordinatorImpl::checkCanServeReadsFor_UNSAFE(OperationContext
if (client->isInDirectClient()) {
return Status::OK();
}
- if (canAcceptWritesFor_UNSAFE(txn, ns)) {
+ if (canAcceptWritesFor_UNSAFE(opCtx, ns)) {
return Status::OK();
}
if (_settings.isSlave() || _settings.isMaster()) {
@@ -1948,9 +1949,9 @@ bool ReplicationCoordinatorImpl::isInPrimaryOrSecondaryState() const {
return _canServeNonLocalReads.loadRelaxed();
}
-bool ReplicationCoordinatorImpl::shouldRelaxIndexConstraints(OperationContext* txn,
+bool ReplicationCoordinatorImpl::shouldRelaxIndexConstraints(OperationContext* opCtx,
const NamespaceString& ns) {
- return !canAcceptWritesFor(txn, ns);
+ return !canAcceptWritesFor(opCtx, ns);
}
OID ReplicationCoordinatorImpl::getElectionId() {
@@ -1977,8 +1978,8 @@ int ReplicationCoordinatorImpl::_getMyId_inlock() const {
return self.getId();
}
-Status ReplicationCoordinatorImpl::resyncData(OperationContext* txn, bool waitUntilCompleted) {
- _stopDataReplication(txn);
+Status ReplicationCoordinatorImpl::resyncData(OperationContext* opCtx, bool waitUntilCompleted) {
+ _stopDataReplication(opCtx);
auto finishedEvent = uassertStatusOK(_replExecutor.makeEvent());
stdx::function<void()> f;
if (waitUntilCompleted)
@@ -1987,7 +1988,7 @@ Status ReplicationCoordinatorImpl::resyncData(OperationContext* txn, bool waitUn
stdx::unique_lock<stdx::mutex> lk(_mutex);
_resetMyLastOpTimes_inlock();
lk.unlock(); // unlock before calling into replCoordExtState.
- _startDataReplication(txn, f);
+ _startDataReplication(opCtx, f);
if (waitUntilCompleted) {
_replExecutor.waitForEvent(finishedEvent);
}
@@ -2212,7 +2213,7 @@ Status ReplicationCoordinatorImpl::setMaintenanceMode(bool activate) {
return Status::OK();
}
-Status ReplicationCoordinatorImpl::processReplSetSyncFrom(OperationContext* txn,
+Status ReplicationCoordinatorImpl::processReplSetSyncFrom(OperationContext* opCtx,
const HostAndPort& target,
BSONObjBuilder* resultObj) {
Status result(ErrorCodes::InternalError, "didn't set status in prepareSyncFromResponse");
@@ -2227,7 +2228,7 @@ Status ReplicationCoordinatorImpl::processReplSetSyncFrom(OperationContext* txn,
}
if (doResync) {
- return resyncData(txn, false);
+ return resyncData(opCtx, false);
}
return result;
@@ -2292,7 +2293,7 @@ Status ReplicationCoordinatorImpl::processHeartbeat(const ReplSetHeartbeatArgs&
return result;
}
-Status ReplicationCoordinatorImpl::processReplSetReconfig(OperationContext* txn,
+Status ReplicationCoordinatorImpl::processReplSetReconfig(OperationContext* opCtx,
const ReplSetReconfigArgs& args,
BSONObjBuilder* resultObj) {
log() << "replSetReconfig admin command received from client";
@@ -2363,7 +2364,7 @@ Status ReplicationCoordinatorImpl::processReplSetReconfig(OperationContext* txn,
}
StatusWith<int> myIndex = validateConfigForReconfig(
- _externalState.get(), oldConfig, newConfig, txn->getServiceContext(), args.force);
+ _externalState.get(), oldConfig, newConfig, opCtx->getServiceContext(), args.force);
if (!myIndex.isOK()) {
error() << "replSetReconfig got " << myIndex.getStatus() << " while validating "
<< newConfigObj;
@@ -2382,7 +2383,7 @@ Status ReplicationCoordinatorImpl::processReplSetReconfig(OperationContext* txn,
}
}
- status = _externalState->storeLocalConfigDocument(txn, newConfig.toBSON());
+ status = _externalState->storeLocalConfigDocument(opCtx, newConfig.toBSON());
if (!status.isOK()) {
error() << "replSetReconfig failed to store config document; " << status;
return status;
@@ -2465,7 +2466,7 @@ void ReplicationCoordinatorImpl::_finishReplSetReconfig(
}
}
-Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* txn,
+Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* opCtx,
const BSONObj& configObj,
BSONObjBuilder* resultObj) {
log() << "replSetInitiate admin command received from client";
@@ -2508,7 +2509,7 @@ Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* txn,
}
StatusWith<int> myIndex =
- validateConfigForInitiate(_externalState.get(), newConfig, txn->getServiceContext());
+ validateConfigForInitiate(_externalState.get(), newConfig, opCtx->getServiceContext());
if (!myIndex.isOK()) {
error() << "replSet initiate got " << myIndex.getStatus() << " while validating "
<< configObj;
@@ -2525,7 +2526,7 @@ Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* txn,
return status;
}
- status = _externalState->initializeReplSetStorage(txn, newConfig.toBSON());
+ status = _externalState->initializeReplSetStorage(opCtx, newConfig.toBSON());
if (!status.isOK()) {
error() << "replSetInitiate failed to store config document or create the oplog; "
<< status;
@@ -2545,7 +2546,7 @@ Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* txn,
// will fail validation with a "replSet initiate got ... while validating" reason.
invariant(!newConfig.getMemberAt(myIndex.getValue()).isArbiter());
_externalState->startThreads(_settings);
- _startDataReplication(txn);
+ _startDataReplication(opCtx);
configStateGuard.Dismiss();
return Status::OK();
@@ -2949,7 +2950,7 @@ Status ReplicationCoordinatorImpl::processReplSetUpdatePosition(const UpdatePosi
return status;
}
-Status ReplicationCoordinatorImpl::processHandshake(OperationContext* txn,
+Status ReplicationCoordinatorImpl::processHandshake(OperationContext* opCtx,
const HandshakeArgs& handshake) {
LOG(2) << "Received handshake " << handshake.toBSON();
@@ -2968,7 +2969,7 @@ Status ReplicationCoordinatorImpl::processHandshake(OperationContext* txn,
SlaveInfo newSlaveInfo;
newSlaveInfo.rid = handshake.getRid();
newSlaveInfo.memberId = -1;
- newSlaveInfo.hostAndPort = _externalState->getClientHostAndPort(txn);
+ newSlaveInfo.hostAndPort = _externalState->getClientHostAndPort(opCtx);
// Don't call _addSlaveInfo_inlock as that would wake sleepers unnecessarily.
_slaveInfo.push_back(newSlaveInfo);
@@ -3121,8 +3122,8 @@ void ReplicationCoordinatorImpl::blacklistSyncSource(const HostAndPort& host, Da
host));
}
-void ReplicationCoordinatorImpl::resetLastOpTimesFromOplog(OperationContext* txn) {
- StatusWith<OpTime> lastOpTimeStatus = _externalState->loadLastOpTime(txn);
+void ReplicationCoordinatorImpl::resetLastOpTimesFromOplog(OperationContext* opCtx) {
+ StatusWith<OpTime> lastOpTimeStatus = _externalState->loadLastOpTime(opCtx);
OpTime lastOpTime;
if (!lastOpTimeStatus.isOK()) {
warning() << "Failed to load timestamp of most recently applied operation; "
@@ -3137,7 +3138,7 @@ void ReplicationCoordinatorImpl::resetLastOpTimesFromOplog(OperationContext* txn
_reportUpstream_inlock(std::move(lock));
// Unlocked below.
- _externalState->setGlobalTimestamp(txn->getServiceContext(), lastOpTime.getTimestamp());
+ _externalState->setGlobalTimestamp(opCtx->getServiceContext(), lastOpTime.getTimestamp());
}
bool ReplicationCoordinatorImpl::shouldChangeSyncSource(
@@ -3244,14 +3245,14 @@ OpTime ReplicationCoordinatorImpl::getLastCommittedOpTime() const {
}
Status ReplicationCoordinatorImpl::processReplSetRequestVotes(
- OperationContext* txn,
+ OperationContext* opCtx,
const ReplSetRequestVotesArgs& args,
ReplSetRequestVotesResponse* response) {
if (!isV1ElectionProtocol()) {
return {ErrorCodes::BadValue, "not using election protocol v1"};
}
- auto termStatus = updateTerm(txn, args.getTerm());
+ auto termStatus = updateTerm(opCtx, args.getTerm());
if (!termStatus.isOK() && termStatus.code() != ErrorCodes::StaleTerm)
return termStatus;
@@ -3264,7 +3265,7 @@ Status ReplicationCoordinatorImpl::processReplSetRequestVotes(
if (!args.isADryRun() && response->getVoteGranted()) {
LastVote lastVote{args.getTerm(), args.getCandidateIndex()};
- Status status = _externalState->storeLocalLastVoteDocument(txn, lastVote);
+ Status status = _externalState->storeLocalLastVoteDocument(opCtx, lastVote);
if (!status.isOK()) {
error() << "replSetRequestVotes failed to store LastVote document; " << status;
return status;
@@ -3405,7 +3406,7 @@ EventHandle ReplicationCoordinatorImpl::updateTerm_forTest(
return finishEvh;
}
-Status ReplicationCoordinatorImpl::updateTerm(OperationContext* txn, long long term) {
+Status ReplicationCoordinatorImpl::updateTerm(OperationContext* opCtx, long long term) {
// Term is only valid if we are replicating.
if (getReplicationMode() != modeReplSet) {
return {ErrorCodes::BadValue, "cannot supply 'term' without active replication"};
@@ -3417,7 +3418,7 @@ Status ReplicationCoordinatorImpl::updateTerm(OperationContext* txn, long long t
}
// Check we haven't acquired any lock, because potential stepdown needs global lock.
- dassert(!txn->lockState()->isLocked());
+ dassert(!opCtx->lockState()->isLocked());
TopologyCoordinator::UpdateTermResult updateTermResult;
EventHandle finishEvh;
@@ -3469,12 +3470,12 @@ EventHandle ReplicationCoordinatorImpl::_updateTerm_incallback(
return EventHandle();
}
-SnapshotName ReplicationCoordinatorImpl::reserveSnapshotName(OperationContext* txn) {
+SnapshotName ReplicationCoordinatorImpl::reserveSnapshotName(OperationContext* opCtx) {
auto reservedName = SnapshotName(_snapshotNameGenerator.addAndFetch(1));
dassert(reservedName > SnapshotName::min());
dassert(reservedName < SnapshotName::max());
- if (txn) {
- ReplClientInfo::forClient(txn->getClient()).setLastSnapshot(reservedName);
+ if (opCtx) {
+ ReplClientInfo::forClient(opCtx->getClient()).setLastSnapshot(reservedName);
}
return reservedName;
}
@@ -3483,12 +3484,12 @@ void ReplicationCoordinatorImpl::forceSnapshotCreation() {
_externalState->forceSnapshotCreation();
}
-void ReplicationCoordinatorImpl::waitUntilSnapshotCommitted(OperationContext* txn,
+void ReplicationCoordinatorImpl::waitUntilSnapshotCommitted(OperationContext* opCtx,
const SnapshotName& untilSnapshot) {
stdx::unique_lock<stdx::mutex> lock(_mutex);
while (!_currentCommittedSnapshot || _currentCommittedSnapshot->name < untilSnapshot) {
- txn->waitForConditionOrInterrupt(_currentCommittedSnapshotCond, lock);
+ opCtx->waitForConditionOrInterrupt(_currentCommittedSnapshotCond, lock);
}
}
@@ -3496,11 +3497,11 @@ size_t ReplicationCoordinatorImpl::getNumUncommittedSnapshots() {
return _uncommittedSnapshotsSize.load();
}
-void ReplicationCoordinatorImpl::createSnapshot(OperationContext* txn,
+void ReplicationCoordinatorImpl::createSnapshot(OperationContext* opCtx,
OpTime timeOfSnapshot,
SnapshotName name) {
stdx::lock_guard<stdx::mutex> lock(_mutex);
- _externalState->createSnapshot(txn, name);
+ _externalState->createSnapshot(opCtx, name);
auto snapshotInfo = SnapshotInfo{timeOfSnapshot, name};
if (timeOfSnapshot <= _lastCommittedOpTime) {
@@ -3588,10 +3589,10 @@ EventHandle ReplicationCoordinatorImpl::_resetElectionInfoOnProtocolVersionUpgra
if (cbData.status == ErrorCodes::CallbackCanceled) {
return;
}
- invariant(cbData.txn);
+ invariant(cbData.opCtx);
LastVote lastVote{OpTime::kInitialTerm, -1};
- auto status = _externalState->storeLocalLastVoteDocument(cbData.txn, lastVote);
+ auto status = _externalState->storeLocalLastVoteDocument(cbData.opCtx, lastVote);
invariant(status.isOK());
_replExecutor.signalEvent(evh);
});
diff --git a/src/mongo/db/repl/replication_coordinator_impl.h b/src/mongo/db/repl/replication_coordinator_impl.h
index 5b2722eaab2..1fa82993ff6 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_impl.h
@@ -101,9 +101,9 @@ public:
// ================== Members of public ReplicationCoordinator API ===================
- virtual void startup(OperationContext* txn) override;
+ virtual void startup(OperationContext* opCtx) override;
- virtual void shutdown(OperationContext* txn) override;
+ virtual void shutdown(OperationContext* opCtx) override;
virtual ReplicationExecutor* getExecutor() override {
return &_replExecutor;
@@ -124,34 +124,34 @@ public:
virtual void clearSyncSourceBlacklist() override;
virtual ReplicationCoordinator::StatusAndDuration awaitReplication(
- OperationContext* txn, const OpTime& opTime, const WriteConcernOptions& writeConcern);
+ OperationContext* opCtx, const OpTime& opTime, const WriteConcernOptions& writeConcern);
virtual ReplicationCoordinator::StatusAndDuration awaitReplicationOfLastOpForClient(
- OperationContext* txn, const WriteConcernOptions& writeConcern);
+ OperationContext* opCtx, const WriteConcernOptions& writeConcern);
- virtual Status stepDown(OperationContext* txn,
+ virtual Status stepDown(OperationContext* opCtx,
bool force,
const Milliseconds& waitTime,
const Milliseconds& stepdownTime);
virtual bool isMasterForReportingPurposes();
- virtual bool canAcceptWritesForDatabase(OperationContext* txn, StringData dbName);
- virtual bool canAcceptWritesForDatabase_UNSAFE(OperationContext* txn, StringData dbName);
+ virtual bool canAcceptWritesForDatabase(OperationContext* opCtx, StringData dbName);
+ virtual bool canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx, StringData dbName);
- bool canAcceptWritesFor(OperationContext* txn, const NamespaceString& ns) override;
- bool canAcceptWritesFor_UNSAFE(OperationContext* txn, const NamespaceString& ns) override;
+ bool canAcceptWritesFor(OperationContext* opCtx, const NamespaceString& ns) override;
+ bool canAcceptWritesFor_UNSAFE(OperationContext* opCtx, const NamespaceString& ns) override;
virtual Status checkIfWriteConcernCanBeSatisfied(const WriteConcernOptions& writeConcern) const;
- virtual Status checkCanServeReadsFor(OperationContext* txn,
+ virtual Status checkCanServeReadsFor(OperationContext* opCtx,
const NamespaceString& ns,
bool slaveOk);
- virtual Status checkCanServeReadsFor_UNSAFE(OperationContext* txn,
+ virtual Status checkCanServeReadsFor_UNSAFE(OperationContext* opCtx,
const NamespaceString& ns,
bool slaveOk);
- virtual bool shouldRelaxIndexConstraints(OperationContext* txn, const NamespaceString& ns);
+ virtual bool shouldRelaxIndexConstraints(OperationContext* opCtx, const NamespaceString& ns);
virtual Status setLastOptimeForSlave(const OID& rid, const Timestamp& ts);
@@ -168,7 +168,7 @@ public:
virtual OpTime getMyLastAppliedOpTime() const override;
virtual OpTime getMyLastDurableOpTime() const override;
- virtual Status waitUntilOpTimeForRead(OperationContext* txn,
+ virtual Status waitUntilOpTimeForRead(OperationContext* opCtx,
const ReadConcernArgs& settings) override;
virtual OID getElectionId() override;
@@ -181,14 +181,14 @@ public:
virtual ApplierState getApplierState() override;
- virtual void signalDrainComplete(OperationContext* txn,
+ virtual void signalDrainComplete(OperationContext* opCtx,
long long termWhenBufferIsEmpty) override;
virtual Status waitForDrainFinish(Milliseconds timeout) override;
virtual void signalUpstreamUpdater() override;
- virtual Status resyncData(OperationContext* txn, bool waitUntilCompleted) override;
+ virtual Status resyncData(OperationContext* opCtx, bool waitUntilCompleted) override;
virtual StatusWith<BSONObj> prepareReplSetUpdatePositionCommand(
ReplSetUpdatePositionCommandStyle commandStyle) const override;
@@ -214,7 +214,7 @@ public:
virtual bool getMaintenanceMode() override;
- virtual Status processReplSetSyncFrom(OperationContext* txn,
+ virtual Status processReplSetSyncFrom(OperationContext* opCtx,
const HostAndPort& target,
BSONObjBuilder* resultObj) override;
@@ -223,11 +223,11 @@ public:
virtual Status processHeartbeat(const ReplSetHeartbeatArgs& args,
ReplSetHeartbeatResponse* response) override;
- virtual Status processReplSetReconfig(OperationContext* txn,
+ virtual Status processReplSetReconfig(OperationContext* opCtx,
const ReplSetReconfigArgs& args,
BSONObjBuilder* resultObj) override;
- virtual Status processReplSetInitiate(OperationContext* txn,
+ virtual Status processReplSetInitiate(OperationContext* opCtx,
const BSONObj& configObj,
BSONObjBuilder* resultObj) override;
@@ -246,7 +246,8 @@ public:
virtual Status processReplSetUpdatePosition(const UpdatePositionArgs& updates,
long long* configVersion) override;
- virtual Status processHandshake(OperationContext* txn, const HandshakeArgs& handshake) override;
+ virtual Status processHandshake(OperationContext* opCtx,
+ const HandshakeArgs& handshake) override;
virtual bool buildsIndexes() override;
@@ -265,7 +266,7 @@ public:
virtual void blacklistSyncSource(const HostAndPort& host, Date_t until) override;
- virtual void resetLastOpTimesFromOplog(OperationContext* txn) override;
+ virtual void resetLastOpTimesFromOplog(OperationContext* opCtx) override;
virtual bool shouldChangeSyncSource(
const HostAndPort& currentSource,
@@ -274,7 +275,7 @@ public:
virtual OpTime getLastCommittedOpTime() const override;
- virtual Status processReplSetRequestVotes(OperationContext* txn,
+ virtual Status processReplSetRequestVotes(OperationContext* opCtx,
const ReplSetRequestVotesArgs& args,
ReplSetRequestVotesResponse* response) override;
@@ -302,19 +303,19 @@ public:
return _service;
}
- virtual Status updateTerm(OperationContext* txn, long long term) override;
+ virtual Status updateTerm(OperationContext* opCtx, long long term) override;
- virtual SnapshotName reserveSnapshotName(OperationContext* txn) override;
+ virtual SnapshotName reserveSnapshotName(OperationContext* opCtx) override;
virtual void forceSnapshotCreation() override;
- virtual void createSnapshot(OperationContext* txn,
+ virtual void createSnapshot(OperationContext* opCtx,
OpTime timeOfSnapshot,
SnapshotName name) override;
virtual OpTime getCurrentCommittedSnapshotOpTime() const override;
- virtual void waitUntilSnapshotCommitted(OperationContext* txn,
+ virtual void waitUntilSnapshotCommitted(OperationContext* opCtx,
const SnapshotName& untilSnapshot) override;
virtual void appendConnectionStats(executor::ConnectionPoolStats* stats) const override;
@@ -622,7 +623,7 @@ private:
* operation timing to the caller.
*/
Status _awaitReplication_inlock(stdx::unique_lock<stdx::mutex>* lock,
- OperationContext* txn,
+ OperationContext* opCtx,
const OpTime& opTime,
SnapshotName minSnapshot,
const WriteConcernOptions& writeConcern);
@@ -793,7 +794,7 @@ private:
* config detected but more work is needed to set it as the local config (which will be
* handled by the callback to _finishLoadLocalConfig).
*/
- bool _startLoadLocalConfig(OperationContext* txn);
+ bool _startLoadLocalConfig(OperationContext* opCtx);
/**
* Callback that finishes the work started in _startLoadLocalConfig and sets _rsConfigState
@@ -807,13 +808,13 @@ private:
/**
* Start replicating data, and does an initial sync if needed first.
*/
- void _startDataReplication(OperationContext* txn,
+ void _startDataReplication(OperationContext* opCtx,
stdx::function<void()> startCompleted = nullptr);
/**
* Stops replicating data by stopping the applier, fetcher and such.
*/
- void _stopDataReplication(OperationContext* txn);
+ void _stopDataReplication(OperationContext* opCtx);
/**
* Finishes the work of processReplSetInitiate() while holding _topoMutex, in the event of
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp
index 89325f7f424..5724fb43bf6 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp
@@ -157,15 +157,15 @@ TEST_F(ReplCoordElectTest, ElectionSucceedsWhenNodeIsTheOnlyElectableNode) {
<< getReplCoord()->getMemberState().toString();
ASSERT(getReplCoord()->getApplierState() == ReplicationCoordinator::ApplierState::Draining);
- const auto txnPtr = makeOperationContext();
- auto& txn = *txnPtr;
+ const auto opCtxPtr = makeOperationContext();
+ auto& opCtx = *opCtxPtr;
// Since we're still in drain mode, expect that we report ismaster: false, issecondary:true.
IsMasterResponse imResponse;
getReplCoord()->fillIsMasterForReplSet(&imResponse);
ASSERT_FALSE(imResponse.isMaster()) << imResponse.toBSON().toString();
ASSERT_TRUE(imResponse.isSecondary()) << imResponse.toBSON().toString();
- getReplCoord()->signalDrainComplete(&txn, getReplCoord()->getTerm());
+ getReplCoord()->signalDrainComplete(&opCtx, getReplCoord()->getTerm());
getReplCoord()->fillIsMasterForReplSet(&imResponse);
ASSERT_TRUE(imResponse.isMaster()) << imResponse.toBSON().toString();
ASSERT_FALSE(imResponse.isSecondary()) << imResponse.toBSON().toString();
@@ -191,15 +191,15 @@ TEST_F(ReplCoordElectTest, ElectionSucceedsWhenNodeIsTheOnlyNode) {
<< getReplCoord()->getMemberState().toString();
ASSERT(getReplCoord()->getApplierState() == ReplicationCoordinator::ApplierState::Draining);
- const auto txnPtr = makeOperationContext();
- auto& txn = *txnPtr;
+ const auto opCtxPtr = makeOperationContext();
+ auto& opCtx = *opCtxPtr;
// Since we're still in drain mode, expect that we report ismaster: false, issecondary:true.
IsMasterResponse imResponse;
getReplCoord()->fillIsMasterForReplSet(&imResponse);
ASSERT_FALSE(imResponse.isMaster()) << imResponse.toBSON().toString();
ASSERT_TRUE(imResponse.isSecondary()) << imResponse.toBSON().toString();
- getReplCoord()->signalDrainComplete(&txn, getReplCoord()->getTerm());
+ getReplCoord()->signalDrainComplete(&opCtx, getReplCoord()->getTerm());
getReplCoord()->fillIsMasterForReplSet(&imResponse);
ASSERT_TRUE(imResponse.isMaster()) << imResponse.toBSON().toString();
ASSERT_FALSE(imResponse.isSecondary()) << imResponse.toBSON().toString();
@@ -218,7 +218,7 @@ TEST_F(ReplCoordElectTest, ElectionSucceedsWhenAllNodesVoteYea) {
<< BSON("_id" << 3 << "host"
<< "node3:12345")));
assertStartSuccess(configObj, HostAndPort("node1", 12345));
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
getReplCoord()->setMyLastAppliedOpTime(OpTime{{100, 1}, 0});
getExternalState()->setLastOpTime(OpTime{{100, 1}, 0});
@@ -246,7 +246,7 @@ TEST_F(ReplCoordElectTest, ElectionFailsWhenOneNodeVotesNay) {
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
OpTime time1(Timestamp(100, 1), 0);
getReplCoord()->setMyLastAppliedOpTime(time1);
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -293,7 +293,7 @@ TEST_F(ReplCoordElectTest, VotesWithStringValuesAreNotCountedAsYeas) {
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
OpTime time1(Timestamp(100, 1), 0);
getReplCoord()->setMyLastAppliedOpTime(time1);
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -341,7 +341,7 @@ TEST_F(ReplCoordElectTest, ElectionsAbortWhenNodeTransitionsToRollbackState) {
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
OpTime time1(Timestamp(100, 1), 0);
getReplCoord()->setMyLastAppliedOpTime(time1);
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -363,7 +363,7 @@ TEST_F(ReplCoordElectTest, ElectionsAbortWhenNodeTransitionsToRollbackState) {
TEST_F(ReplCoordElectTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
// start up, receive reconfig via heartbeat while at the same time, become candidate.
// candidate state should be cleared.
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
assertStartSuccess(BSON("_id"
<< "mySet"
<< "version"
@@ -419,7 +419,7 @@ TEST_F(ReplCoordElectTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
args.force = false;
args.newConfigObj = config.toBSON();
ASSERT_EQUALS(ErrorCodes::ConfigurationInProgress,
- getReplCoord()->processReplSetReconfig(&txn, args, &result));
+ getReplCoord()->processReplSetReconfig(&opCtx, args, &result));
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(2));
startCapturingLogMessages();
@@ -477,7 +477,7 @@ TEST_F(ReplCoordElectTest, StepsDownRemoteIfNodeHasHigherPriorityThanCurrentPrim
auto replCoord = getReplCoord();
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
OpTime time1(Timestamp(100, 1), 0);
getReplCoord()->setMyLastAppliedOpTime(time1);
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -583,8 +583,8 @@ TEST_F(ReplCoordElectTest, NodeCancelsElectionUponReceivingANewConfigDuringFresh
true};
BSONObjBuilder result;
- const auto txn = makeOperationContext();
- ASSERT_OK(getReplCoord()->processReplSetReconfig(txn.get(), config, &result));
+ const auto opCtx = makeOperationContext();
+ ASSERT_OK(getReplCoord()->processReplSetReconfig(opCtx.get(), config, &result));
// Wait until election cancels.
net->enterNetwork();
net->runReadyNetworkOperations();
@@ -629,8 +629,8 @@ TEST_F(ReplCoordElectTest, NodeCancelsElectionUponReceivingANewConfigDuringElect
true};
BSONObjBuilder result;
- const auto txn = makeOperationContext();
- ASSERT_OK(getReplCoord()->processReplSetReconfig(txn.get(), config, &result));
+ const auto opCtx = makeOperationContext();
+ ASSERT_OK(getReplCoord()->processReplSetReconfig(opCtx.get(), config, &result));
// Wait until election cancels.
getNet()->enterNetwork();
getNet()->runReadyNetworkOperations();
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
index 4484e3beead..e5d2dc214f7 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
@@ -210,9 +210,9 @@ void ReplicationCoordinatorImpl::_writeLastVoteForMyElection(
if (cbData.status == ErrorCodes::CallbackCanceled) {
return;
}
- invariant(cbData.txn);
+ invariant(cbData.opCtx);
- Status status = _externalState->storeLocalLastVoteDocument(cbData.txn, lastVote);
+ Status status = _externalState->storeLocalLastVoteDocument(cbData.opCtx, lastVote);
if (!status.isOK()) {
error() << "failed to store LastVote document when voting for myself: " << status;
return;
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
index e3660682e3c..be2370b7108 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
@@ -162,15 +162,15 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenNodeIsTheOnlyElectableNode) {
simulateCatchUpTimeout();
ASSERT(getReplCoord()->getApplierState() == ApplierState::Draining);
- const auto txnPtr = makeOperationContext();
- auto& txn = *txnPtr;
+ const auto opCtxPtr = makeOperationContext();
+ auto& opCtx = *opCtxPtr;
// Since we're still in drain mode, expect that we report ismaster: false, issecondary:true.
IsMasterResponse imResponse;
getReplCoord()->fillIsMasterForReplSet(&imResponse);
ASSERT_FALSE(imResponse.isMaster()) << imResponse.toBSON().toString();
ASSERT_TRUE(imResponse.isSecondary()) << imResponse.toBSON().toString();
- getReplCoord()->signalDrainComplete(&txn, getReplCoord()->getTerm());
+ getReplCoord()->signalDrainComplete(&opCtx, getReplCoord()->getTerm());
getReplCoord()->fillIsMasterForReplSet(&imResponse);
ASSERT_TRUE(imResponse.isMaster()) << imResponse.toBSON().toString();
ASSERT_FALSE(imResponse.isSecondary()) << imResponse.toBSON().toString();
@@ -226,15 +226,15 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenNodeIsTheOnlyNode) {
simulateCatchUpTimeout();
ASSERT(getReplCoord()->getApplierState() == ApplierState::Draining);
- const auto txnPtr = makeOperationContext();
- auto& txn = *txnPtr;
+ const auto opCtxPtr = makeOperationContext();
+ auto& opCtx = *opCtxPtr;
// Since we're still in drain mode, expect that we report ismaster: false, issecondary:true.
IsMasterResponse imResponse;
getReplCoord()->fillIsMasterForReplSet(&imResponse);
ASSERT_FALSE(imResponse.isMaster()) << imResponse.toBSON().toString();
ASSERT_TRUE(imResponse.isSecondary()) << imResponse.toBSON().toString();
- getReplCoord()->signalDrainComplete(&txn, getReplCoord()->getTerm());
+ getReplCoord()->signalDrainComplete(&opCtx, getReplCoord()->getTerm());
getReplCoord()->fillIsMasterForReplSet(&imResponse);
ASSERT_TRUE(imResponse.isMaster()) << imResponse.toBSON().toString();
ASSERT_FALSE(imResponse.isSecondary()) << imResponse.toBSON().toString();
@@ -255,7 +255,7 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenAllNodesVoteYea) {
<< "protocolVersion"
<< 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0));
getReplCoord()->setMyLastDurableOpTime(OpTime(Timestamp(100, 1), 0));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -296,7 +296,7 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenMaxSevenNodesVoteYea) {
<< "protocolVersion"
<< 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0));
getReplCoord()->setMyLastDurableOpTime(OpTime(Timestamp(100, 1), 0));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -332,7 +332,7 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringDryRun)
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
OpTime time1(Timestamp(100, 1), 0);
getReplCoord()->setMyLastAppliedOpTime(time1);
getReplCoord()->setMyLastDurableOpTime(time1);
@@ -391,7 +391,7 @@ TEST_F(ReplCoordTest, ElectionFailsWhenDryRunResponseContainsANewerTerm) {
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
OpTime time1(Timestamp(100, 1), 0);
getReplCoord()->setMyLastAppliedOpTime(time1);
getReplCoord()->setMyLastDurableOpTime(time1);
@@ -439,7 +439,7 @@ TEST_F(ReplCoordTest, ElectionFailsWhenDryRunResponseContainsANewerTerm) {
TEST_F(ReplCoordTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
// start up, receive reconfig via heartbeat while at the same time, become candidate.
// candidate state should be cleared.
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
assertStartSuccess(BSON("_id"
<< "mySet"
<< "version"
@@ -497,7 +497,7 @@ TEST_F(ReplCoordTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
args.force = false;
args.newConfigObj = config.toBSON();
ASSERT_EQUALS(ErrorCodes::ConfigurationInProgress,
- getReplCoord()->processReplSetReconfig(&txn, args, &result));
+ getReplCoord()->processReplSetReconfig(&opCtx, args, &result));
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(2));
startCapturingLogMessages();
@@ -568,7 +568,7 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringRequest
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
OpTime time1(Timestamp(100, 1), 0);
getReplCoord()->setMyLastAppliedOpTime(time1);
getReplCoord()->setMyLastDurableOpTime(time1);
@@ -619,7 +619,7 @@ TEST_F(ReplCoordTest, ElectionsAbortWhenNodeTransitionsToRollbackState) {
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
OpTime time1(Timestamp(100, 1), 0);
getReplCoord()->setMyLastAppliedOpTime(time1);
getReplCoord()->setMyLastDurableOpTime(time1);
@@ -657,7 +657,7 @@ TEST_F(ReplCoordTest, ElectionFailsWhenVoteRequestResponseContainsANewerTerm) {
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
OpTime time1(Timestamp(100, 1), 0);
getReplCoord()->setMyLastAppliedOpTime(time1);
getReplCoord()->setMyLastDurableOpTime(time1);
@@ -713,7 +713,7 @@ TEST_F(ReplCoordTest, ElectionFailsWhenTermChangesDuringDryRun) {
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
OpTime time1(Timestamp(100, 1), 0);
getReplCoord()->setMyLastAppliedOpTime(time1);
getReplCoord()->setMyLastDurableOpTime(time1);
@@ -752,7 +752,7 @@ TEST_F(ReplCoordTest, ElectionFailsWhenTermChangesDuringActualElection) {
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplSetConfig config = assertMakeRSConfig(configObj);
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
OpTime time1(Timestamp(100, 1), 0);
getReplCoord()->setMyLastAppliedOpTime(time1);
getReplCoord()->setMyLastDurableOpTime(time1);
@@ -761,7 +761,7 @@ TEST_F(ReplCoordTest, ElectionFailsWhenTermChangesDuringActualElection) {
simulateEnoughHeartbeatsForAllNodesUp();
simulateSuccessfulDryRun();
// update to a future term before the election completes
- getReplCoord()->updateTerm(&txn, 1000);
+ getReplCoord()->updateTerm(&opCtx, 1000);
NetworkInterfaceMock* net = getNet();
net->enterNetwork();
@@ -942,7 +942,7 @@ TEST_F(PriorityTakeoverTest, SchedulesPriorityTakeoverIfNodeHasHigherPriorityTha
auto replCoord = getReplCoord();
auto now = getNet()->now();
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
OpTime myOptime(Timestamp(100, 1), 0);
replCoord->setMyLastAppliedOpTime(myOptime);
replCoord->setMyLastDurableOpTime(myOptime);
@@ -963,7 +963,7 @@ TEST_F(PriorityTakeoverTest, SchedulesPriorityTakeoverIfNodeHasHigherPriorityTha
assertValidTakeoverDelay(config, now, priorityTakeoverTime, 0);
// Also make sure that updating the term cancels the scheduled priority takeover.
- ASSERT_EQUALS(ErrorCodes::StaleTerm, replCoord->updateTerm(&txn, replCoord->getTerm() + 1));
+ ASSERT_EQUALS(ErrorCodes::StaleTerm, replCoord->updateTerm(&opCtx, replCoord->getTerm() + 1));
ASSERT_FALSE(replCoord->getPriorityTakeover_forTest());
}
@@ -989,7 +989,7 @@ TEST_F(PriorityTakeoverTest, SuccessfulPriorityTakeover) {
auto replCoord = getReplCoord();
auto now = getNet()->now();
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
OpTime myOptime(Timestamp(100, 1), 0);
replCoord->setMyLastAppliedOpTime(myOptime);
replCoord->setMyLastDurableOpTime(myOptime);
@@ -1043,7 +1043,7 @@ TEST_F(PriorityTakeoverTest, DontCallForPriorityTakeoverWhenLaggedSameSecond) {
auto timeZero = getNet()->now();
auto now = getNet()->now();
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
OpTime currentOpTime(Timestamp(100, 5000), 0);
OpTime behindOpTime(Timestamp(100, 3999), 0);
OpTime closeEnoughOpTime(Timestamp(100, 4000), 0);
@@ -1119,7 +1119,7 @@ TEST_F(PriorityTakeoverTest, DontCallForPriorityTakeoverWhenLaggedDifferentSecon
auto timeZero = getNet()->now();
auto now = getNet()->now();
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
OpTime currentOpTime(Timestamp(100, 0), 0);
OpTime behindOpTime(Timestamp(97, 0), 0);
OpTime closeEnoughOpTime(Timestamp(98, 0), 0);
@@ -1218,8 +1218,8 @@ TEST_F(ReplCoordTest, NodeCancelsElectionUponReceivingANewConfigDuringDryRun) {
true};
BSONObjBuilder result;
- const auto txn = makeOperationContext();
- ASSERT_OK(getReplCoord()->processReplSetReconfig(txn.get(), config, &result));
+ const auto opCtx = makeOperationContext();
+ ASSERT_OK(getReplCoord()->processReplSetReconfig(opCtx.get(), config, &result));
// Wait until election cancels.
net->enterNetwork();
net->runReadyNetworkOperations();
@@ -1264,8 +1264,8 @@ TEST_F(ReplCoordTest, NodeCancelsElectionUponReceivingANewConfigDuringVotePhase)
true};
BSONObjBuilder result;
- const auto txn = makeOperationContext();
- ASSERT_OK(getReplCoord()->processReplSetReconfig(txn.get(), config, &result));
+ const auto opCtx = makeOperationContext();
+ ASSERT_OK(getReplCoord()->processReplSetReconfig(opCtx.get(), config, &result));
// Wait until election cancels.
getNet()->enterNetwork();
getNet()->runReadyNetworkOperations();
@@ -1429,10 +1429,10 @@ TEST_F(PrimaryCatchUpTest, PrimaryDoNotNeedToCatchUp) {
ASSERT(getReplCoord()->getApplierState() == ApplierState::Draining);
stopCapturingLogMessages();
ASSERT_EQUALS(1, countLogLinesContaining("My optime is most up-to-date, skipping catch-up"));
- auto txn = makeOperationContext();
- getReplCoord()->signalDrainComplete(txn.get(), getReplCoord()->getTerm());
- Lock::GlobalLock lock(txn->lockState(), MODE_IX, 1);
- ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(txn.get(), "test"));
+ auto opCtx = makeOperationContext();
+ getReplCoord()->signalDrainComplete(opCtx.get(), getReplCoord()->getTerm());
+ Lock::GlobalLock lock(opCtx->lockState(), MODE_IX, 1);
+ ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
}
TEST_F(PrimaryCatchUpTest, PrimaryFreshnessScanTimeout) {
@@ -1453,10 +1453,10 @@ TEST_F(PrimaryCatchUpTest, PrimaryFreshnessScanTimeout) {
ASSERT(getReplCoord()->getApplierState() == ApplierState::Draining);
stopCapturingLogMessages();
ASSERT_EQUALS(1, countLogLinesContaining("Could not access any nodes within timeout"));
- auto txn = makeOperationContext();
- getReplCoord()->signalDrainComplete(txn.get(), getReplCoord()->getTerm());
- Lock::GlobalLock lock(txn->lockState(), MODE_IX, 1);
- ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(txn.get(), "test"));
+ auto opCtx = makeOperationContext();
+ getReplCoord()->signalDrainComplete(opCtx.get(), getReplCoord()->getTerm());
+ Lock::GlobalLock lock(opCtx->lockState(), MODE_IX, 1);
+ ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
}
TEST_F(PrimaryCatchUpTest, PrimaryCatchUpSucceeds) {
@@ -1483,10 +1483,10 @@ TEST_F(PrimaryCatchUpTest, PrimaryCatchUpSucceeds) {
ASSERT(getReplCoord()->getApplierState() == ApplierState::Draining);
stopCapturingLogMessages();
ASSERT_EQUALS(1, countLogLinesContaining("Finished catch-up oplog after becoming primary."));
- auto txn = makeOperationContext();
- getReplCoord()->signalDrainComplete(txn.get(), getReplCoord()->getTerm());
- Lock::GlobalLock lock(txn->lockState(), MODE_IX, 1);
- ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(txn.get(), "test"));
+ auto opCtx = makeOperationContext();
+ getReplCoord()->signalDrainComplete(opCtx.get(), getReplCoord()->getTerm());
+ Lock::GlobalLock lock(opCtx->lockState(), MODE_IX, 1);
+ ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
}
TEST_F(PrimaryCatchUpTest, PrimaryCatchUpTimeout) {
@@ -1507,10 +1507,10 @@ TEST_F(PrimaryCatchUpTest, PrimaryCatchUpTimeout) {
ASSERT(getReplCoord()->getApplierState() == ApplierState::Draining);
stopCapturingLogMessages();
ASSERT_EQUALS(1, countLogLinesContaining("Cannot catch up oplog after becoming primary"));
- auto txn = makeOperationContext();
- getReplCoord()->signalDrainComplete(txn.get(), getReplCoord()->getTerm());
- Lock::GlobalLock lock(txn->lockState(), MODE_IX, 1);
- ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(txn.get(), "test"));
+ auto opCtx = makeOperationContext();
+ getReplCoord()->signalDrainComplete(opCtx.get(), getReplCoord()->getTerm());
+ Lock::GlobalLock lock(opCtx->lockState(), MODE_IX, 1);
+ ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
}
TEST_F(PrimaryCatchUpTest, PrimaryStepsDownDuringFreshnessScan) {
@@ -1536,9 +1536,9 @@ TEST_F(PrimaryCatchUpTest, PrimaryStepsDownDuringFreshnessScan) {
ASSERT(getReplCoord()->getApplierState() == ApplierState::Running);
stopCapturingLogMessages();
ASSERT_EQUALS(1, countLogLinesContaining("Stopped transition to primary"));
- auto txn = makeOperationContext();
- Lock::GlobalLock lock(txn->lockState(), MODE_IX, 1);
- ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(txn.get(), "test"));
+ auto opCtx = makeOperationContext();
+ Lock::GlobalLock lock(opCtx->lockState(), MODE_IX, 1);
+ ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
}
TEST_F(PrimaryCatchUpTest, PrimaryStepsDownDuringCatchUp) {
@@ -1564,15 +1564,15 @@ TEST_F(PrimaryCatchUpTest, PrimaryStepsDownDuringCatchUp) {
net->enterNetwork();
net->runReadyNetworkOperations();
net->exitNetwork();
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
// Simulate the applier signaling replCoord to exit drain mode.
// At this point, we see the stepdown and reset the states.
- getReplCoord()->signalDrainComplete(txn.get(), getReplCoord()->getTerm());
+ getReplCoord()->signalDrainComplete(opCtx.get(), getReplCoord()->getTerm());
ASSERT(getReplCoord()->getApplierState() == ApplierState::Running);
stopCapturingLogMessages();
ASSERT_EQUALS(1, countLogLinesContaining("Cannot catch up oplog after becoming primary"));
- Lock::GlobalLock lock(txn->lockState(), MODE_IX, 1);
- ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(txn.get(), "test"));
+ Lock::GlobalLock lock(opCtx->lockState(), MODE_IX, 1);
+ ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
}
TEST_F(PrimaryCatchUpTest, PrimaryStepsDownDuringDrainMode) {
@@ -1618,15 +1618,15 @@ TEST_F(PrimaryCatchUpTest, PrimaryStepsDownDuringDrainMode) {
getNet()->scheduleResponse(noi, getNet()->now(), makeFreshnessScanResponse(OpTime()));
});
ASSERT(replCoord->getApplierState() == ApplierState::Draining);
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
{
- Lock::GlobalLock lock(txn->lockState(), MODE_IX, 1);
- ASSERT_FALSE(replCoord->canAcceptWritesForDatabase(txn.get(), "test"));
+ Lock::GlobalLock lock(opCtx->lockState(), MODE_IX, 1);
+ ASSERT_FALSE(replCoord->canAcceptWritesForDatabase(opCtx.get(), "test"));
}
- replCoord->signalDrainComplete(txn.get(), replCoord->getTerm());
- Lock::GlobalLock lock(txn->lockState(), MODE_IX, 1);
+ replCoord->signalDrainComplete(opCtx.get(), replCoord->getTerm());
+ Lock::GlobalLock lock(opCtx->lockState(), MODE_IX, 1);
ASSERT(replCoord->getApplierState() == ApplierState::Stopped);
- ASSERT_TRUE(replCoord->canAcceptWritesForDatabase(txn.get(), "test"));
+ ASSERT_TRUE(replCoord->canAcceptWritesForDatabase(opCtx.get(), "test"));
}
} // namespace
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
index 602f3cb5f40..db42e12f13d 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
@@ -385,7 +385,7 @@ void ReplicationCoordinatorImpl::_stepDownFinish(
LockGuard topoLock(_topoMutex);
- invariant(cbData.txn);
+ invariant(cbData.opCtx);
// TODO Add invariant that we've got global shared or global exclusive lock, when supported
// by lock manager.
stdx::unique_lock<stdx::mutex> lk(_mutex);
@@ -496,7 +496,7 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigStore(
"it is invalid: "
<< myIndex.getStatus();
} else {
- Status status = _externalState->storeLocalConfigDocument(cbd.txn, newConfig.toBSON());
+ Status status = _externalState->storeLocalConfigDocument(cbd.opCtx, newConfig.toBSON());
lk.lock();
if (!status.isOK()) {
@@ -518,7 +518,7 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigStore(
newConfig.getMemberAt(myIndex.getValue()).isArbiter();
if (!isArbiter && isFirstConfig) {
_externalState->startThreads(_settings);
- _startDataReplication(cbd.txn);
+ _startDataReplication(cbd.opCtx);
}
}
@@ -558,7 +558,7 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigFinish(
invariant(!_rsConfig.isInitialized() ||
_rsConfig.getConfigVersion() < newConfig.getConfigVersion());
- if (_getMemberState_inlock().primary() && !cbData.txn) {
+ if (_getMemberState_inlock().primary() && !cbData.opCtx) {
// Not having an OperationContext in the CallbackData means we definitely aren't holding
// the global lock. Since we're primary and this reconfig could cause us to stepdown,
// reschedule this work with the global exclusive lock so the stepdown is safe.
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp
index 69c9b6541c6..2aee49fd3e4 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp
@@ -139,10 +139,10 @@ TEST_F(ReplCoordHBTest, NodeJoinsExistingReplSetWhenReceivingAConfigContainingTh
noi = net->getNextReadyRequest();
assertMemberState(MemberState::RS_STARTUP2);
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
ReplSetConfig storedConfig;
ASSERT_OK(storedConfig.initialize(
- unittest::assertGet(getExternalState()->loadLocalConfigDocument(&txn))));
+ unittest::assertGet(getExternalState()->loadLocalConfigDocument(&opCtx))));
ASSERT_OK(storedConfig.validate());
ASSERT_EQUALS(3, storedConfig.getConfigVersion());
ASSERT_EQUALS(3, storedConfig.getNumMembers());
@@ -205,9 +205,9 @@ TEST_F(ReplCoordHBTest,
noi = net->getNextReadyRequest();
assertMemberState(MemberState::RS_STARTUP, "2");
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
- StatusWith<BSONObj> loadedConfig(getExternalState()->loadLocalConfigDocument(&txn));
+ StatusWith<BSONObj> loadedConfig(getExternalState()->loadLocalConfigDocument(&opCtx));
ASSERT_NOT_OK(loadedConfig.getStatus()) << loadedConfig.getValue();
exitNetwork();
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
index ccc53d0bff9..5f4101e9eb2 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
@@ -138,10 +138,10 @@ TEST_F(ReplCoordHBV1Test,
noi = net->getNextReadyRequest();
assertMemberState(MemberState::RS_STARTUP2);
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
ReplSetConfig storedConfig;
ASSERT_OK(storedConfig.initialize(
- unittest::assertGet(getExternalState()->loadLocalConfigDocument(&txn))));
+ unittest::assertGet(getExternalState()->loadLocalConfigDocument(&opCtx))));
ASSERT_OK(storedConfig.validate());
ASSERT_EQUALS(3, storedConfig.getConfigVersion());
ASSERT_EQUALS(3, storedConfig.getNumMembers());
@@ -207,10 +207,10 @@ TEST_F(ReplCoordHBV1Test,
noi = net->getNextReadyRequest();
assertMemberState(MemberState::RS_ARBITER);
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
ReplSetConfig storedConfig;
ASSERT_OK(storedConfig.initialize(
- unittest::assertGet(getExternalState()->loadLocalConfigDocument(&txn))));
+ unittest::assertGet(getExternalState()->loadLocalConfigDocument(&opCtx))));
ASSERT_OK(storedConfig.validate());
ASSERT_EQUALS(3, storedConfig.getConfigVersion());
ASSERT_EQUALS(3, storedConfig.getNumMembers());
@@ -276,9 +276,9 @@ TEST_F(ReplCoordHBV1Test,
noi = net->getNextReadyRequest();
assertMemberState(MemberState::RS_STARTUP, "2");
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
- StatusWith<BSONObj> loadedConfig(getExternalState()->loadLocalConfigDocument(&txn));
+ StatusWith<BSONObj> loadedConfig(getExternalState()->loadLocalConfigDocument(&opCtx));
ASSERT_NOT_OK(loadedConfig.getStatus()) << loadedConfig.getValue();
exitNetwork();
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
index 69c5f2c6fb9..926b4a47a0f 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
@@ -60,9 +60,9 @@ TEST_F(ReplCoordTest, NodeReturnsNotYetInitializedWhenReconfigReceivedPriorToIni
BSONObjBuilder result;
ReplSetReconfigArgs args;
- const auto txn = makeOperationContext();
+ const auto opCtx = makeOperationContext();
ASSERT_EQUALS(ErrorCodes::NotYetInitialized,
- getReplCoord()->processReplSetReconfig(txn.get(), args, &result));
+ getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result));
ASSERT_TRUE(result.obj().isEmpty());
}
@@ -87,9 +87,9 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenReconfigReceivedWhileSecondary) {
BSONObjBuilder result;
ReplSetReconfigArgs args;
args.force = false;
- const auto txn = makeOperationContext();
+ const auto opCtx = makeOperationContext();
ASSERT_EQUALS(ErrorCodes::NotMaster,
- getReplCoord()->processReplSetReconfig(txn.get(), args, &result));
+ getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result));
ASSERT_TRUE(result.obj().isEmpty());
}
@@ -128,10 +128,10 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
<< "node2:12345"
<< "arbiterOnly"
<< true)));
- const auto txn = makeOperationContext();
+ const auto opCtx = makeOperationContext();
// ErrorCodes::BadValue should be propagated from ReplSetConfig::initialize()
ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig,
- getReplCoord()->processReplSetReconfig(txn.get(), args, &result));
+ getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result));
ASSERT_TRUE(result.obj().isEmpty());
}
@@ -165,9 +165,9 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
<< BSON("_id" << 2 << "host"
<< "node2:12345")));
- const auto txn = makeOperationContext();
+ const auto opCtx = makeOperationContext();
ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig,
- getReplCoord()->processReplSetReconfig(txn.get(), args, &result));
+ getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result));
ASSERT_TRUE(result.obj().isEmpty());
}
@@ -205,9 +205,9 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
<< "settings"
<< BSON("replicaSetId" << OID::gen()));
- const auto txn = makeOperationContext();
+ const auto opCtx = makeOperationContext();
ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- getReplCoord()->processReplSetReconfig(txn.get(), args, &result));
+ getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result));
ASSERT_TRUE(result.obj().isEmpty());
}
@@ -242,18 +242,18 @@ TEST_F(ReplCoordTest,
<< BSON("_id" << 2 << "host"
<< "node2:12345")));
- const auto txn = makeOperationContext();
+ const auto opCtx = makeOperationContext();
ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- getReplCoord()->processReplSetReconfig(txn.get(), args, &result));
+ getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result));
ASSERT_TRUE(result.obj().isEmpty());
}
void doReplSetInitiate(ReplicationCoordinatorImpl* replCoord,
Status* status,
- OperationContext* txn) {
+ OperationContext* opCtx) {
BSONObjBuilder garbage;
*status =
- replCoord->processReplSetInitiate(txn,
+ replCoord->processReplSetInitiate(opCtx,
BSON("_id"
<< "mySet"
<< "version"
@@ -268,7 +268,7 @@ void doReplSetInitiate(ReplicationCoordinatorImpl* replCoord,
void doReplSetReconfig(ReplicationCoordinatorImpl* replCoord,
Status* status,
- OperationContext* txn) {
+ OperationContext* opCtx) {
BSONObjBuilder garbage;
ReplSetReconfigArgs args;
args.force = false;
@@ -284,7 +284,7 @@ void doReplSetReconfig(ReplicationCoordinatorImpl* replCoord,
<< "node2:12345"
<< "priority"
<< 3)));
- *status = replCoord->processReplSetReconfig(txn, args, &garbage);
+ *status = replCoord->processReplSetReconfig(opCtx, args, &garbage);
}
TEST_F(ReplCoordTest,
@@ -307,8 +307,9 @@ TEST_F(ReplCoordTest,
simulateSuccessfulV1Election();
Status status(ErrorCodes::InternalError, "Not Set");
- const auto txn = makeOperationContext();
- stdx::thread reconfigThread(stdx::bind(doReplSetReconfig, getReplCoord(), &status, txn.get()));
+ const auto opCtx = makeOperationContext();
+ stdx::thread reconfigThread(
+ stdx::bind(doReplSetReconfig, getReplCoord(), &status, opCtx.get()));
NetworkInterfaceMock* net = getNet();
getNet()->enterNetwork();
@@ -350,8 +351,9 @@ TEST_F(ReplCoordTest, NodeReturnsOutOfDiskSpaceWhenSavingANewConfigFailsDuringRe
Status status(ErrorCodes::InternalError, "Not Set");
getExternalState()->setStoreLocalConfigDocumentStatus(
Status(ErrorCodes::OutOfDiskSpace, "The test set this"));
- const auto txn = makeOperationContext();
- stdx::thread reconfigThread(stdx::bind(doReplSetReconfig, getReplCoord(), &status, txn.get()));
+ const auto opCtx = makeOperationContext();
+ stdx::thread reconfigThread(
+ stdx::bind(doReplSetReconfig, getReplCoord(), &status, opCtx.get()));
replyToReceivedHeartbeat();
reconfigThread.join();
@@ -377,9 +379,10 @@ TEST_F(ReplCoordTest,
simulateSuccessfulV1Election();
Status status(ErrorCodes::InternalError, "Not Set");
- const auto txn = makeOperationContext();
+ const auto opCtx = makeOperationContext();
// first reconfig
- stdx::thread reconfigThread(stdx::bind(doReplSetReconfig, getReplCoord(), &status, txn.get()));
+ stdx::thread reconfigThread(
+ stdx::bind(doReplSetReconfig, getReplCoord(), &status, opCtx.get()));
getNet()->enterNetwork();
getNet()->blackHole(getNet()->getNextReadyRequest());
getNet()->exitNetwork();
@@ -398,10 +401,10 @@ TEST_F(ReplCoordTest,
<< BSON("_id" << 2 << "host"
<< "node2:12345")));
ASSERT_EQUALS(ErrorCodes::ConfigurationInProgress,
- getReplCoord()->processReplSetReconfig(txn.get(), args, &result));
+ getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result));
ASSERT_TRUE(result.obj().isEmpty());
- shutdown(txn.get());
+ shutdown(opCtx.get());
reconfigThread.join();
}
@@ -415,8 +418,8 @@ TEST_F(ReplCoordTest, NodeReturnsConfigurationInProgressWhenReceivingAReconfigWh
// initiate
Status status(ErrorCodes::InternalError, "Not Set");
- const auto txn = makeOperationContext();
- stdx::thread initateThread(stdx::bind(doReplSetInitiate, getReplCoord(), &status, txn.get()));
+ const auto opCtx = makeOperationContext();
+ stdx::thread initateThread(stdx::bind(doReplSetInitiate, getReplCoord(), &status, opCtx.get()));
getNet()->enterNetwork();
getNet()->blackHole(getNet()->getNextReadyRequest());
getNet()->exitNetwork();
@@ -435,10 +438,10 @@ TEST_F(ReplCoordTest, NodeReturnsConfigurationInProgressWhenReceivingAReconfigWh
<< BSON("_id" << 2 << "host"
<< "node2:12345")));
ASSERT_EQUALS(ErrorCodes::ConfigurationInProgress,
- getReplCoord()->processReplSetReconfig(txn.get(), args, &result));
+ getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result));
ASSERT_TRUE(result.obj().isEmpty());
- shutdown(txn.get());
+ shutdown(opCtx.get());
initateThread.join();
}
@@ -462,8 +465,9 @@ TEST_F(ReplCoordTest, PrimaryNodeAcceptsNewConfigWhenReceivingAReconfigWithAComp
simulateSuccessfulV1Election();
Status status(ErrorCodes::InternalError, "Not Set");
- const auto txn = makeOperationContext();
- stdx::thread reconfigThread(stdx::bind(doReplSetReconfig, getReplCoord(), &status, txn.get()));
+ const auto opCtx = makeOperationContext();
+ stdx::thread reconfigThread(
+ stdx::bind(doReplSetReconfig, getReplCoord(), &status, opCtx.get()));
NetworkInterfaceMock* net = getNet();
getNet()->enterNetwork();
@@ -541,9 +545,9 @@ TEST_F(
ReplSetReconfigArgs args;
args.force = false;
args.newConfigObj = config.toBSON();
- const auto txn = makeOperationContext();
+ const auto opCtx = makeOperationContext();
ASSERT_EQUALS(ErrorCodes::ConfigurationInProgress,
- getReplCoord()->processReplSetReconfig(txn.get(), args, &result));
+ getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result));
getExternalState()->setStoreLocalConfigDocumentToHang(false);
}
@@ -568,8 +572,9 @@ TEST_F(ReplCoordTest, NodeDoesNotAcceptHeartbeatReconfigWhileInTheMidstOfReconfi
// start reconfigThread
Status status(ErrorCodes::InternalError, "Not Set");
- const auto txn = makeOperationContext();
- stdx::thread reconfigThread(stdx::bind(doReplSetReconfig, getReplCoord(), &status, txn.get()));
+ const auto opCtx = makeOperationContext();
+ stdx::thread reconfigThread(
+ stdx::bind(doReplSetReconfig, getReplCoord(), &status, opCtx.get()));
// wait for reconfigThread to create network requests to ensure the replication coordinator
// is in state kConfigReconfiguring
@@ -609,7 +614,7 @@ TEST_F(ReplCoordTest, NodeDoesNotAcceptHeartbeatReconfigWhileInTheMidstOfReconfi
stopCapturingLogMessages();
ASSERT_EQUALS(
1, countLogLinesContaining("because already in the midst of a configuration process"));
- shutdown(txn.get());
+ shutdown(opCtx.get());
reconfigThread.join();
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Log());
}
@@ -644,13 +649,13 @@ TEST_F(ReplCoordTest, NodeAcceptsConfigFromAReconfigWithForceTrueWhileNotPrimary
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")));
- const auto txn = makeOperationContext();
+ const auto opCtx = makeOperationContext();
ASSERT_EQUALS(ErrorCodes::NotMaster,
- getReplCoord()->processReplSetReconfig(txn.get(), args, &result));
+ getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result));
// forced should succeed
args.force = true;
- ASSERT_OK(getReplCoord()->processReplSetReconfig(txn.get(), args, &result));
+ ASSERT_OK(getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result));
getReplCoord()->processReplSetGetConfig(&result);
// ensure forced reconfig results in a random larger version
diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
index 3cb0d2d9e3b..ae8d5b5fe92 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
@@ -100,7 +100,7 @@ struct OpTimeWithTermOne {
Timestamp timestamp;
};
-void runSingleNodeElection(ServiceContext::UniqueOperationContext txn,
+void runSingleNodeElection(ServiceContext::UniqueOperationContext opCtx,
ReplicationCoordinatorImpl* replCoord,
executor::NetworkInterfaceMock* net) {
replCoord->setMyLastAppliedOpTime(OpTime(Timestamp(1, 0), 0));
@@ -116,15 +116,15 @@ void runSingleNodeElection(ServiceContext::UniqueOperationContext txn,
ASSERT(replCoord->getApplierState() == ReplicationCoordinator::ApplierState::Draining);
ASSERT(replCoord->getMemberState().primary()) << replCoord->getMemberState().toString();
- replCoord->signalDrainComplete(txn.get(), replCoord->getTerm());
+ replCoord->signalDrainComplete(opCtx.get(), replCoord->getTerm());
}
/**
* Helper that kills an operation, taking the necessary locks.
*/
-void killOperation(OperationContext* txn) {
- stdx::lock_guard<Client> lkClient(*txn->getClient());
- txn->getServiceContext()->killOperation(txn);
+void killOperation(OperationContext* opCtx) {
+ stdx::lock_guard<Client> lkClient(*opCtx->getClient());
+ opCtx->getServiceContext()->killOperation(opCtx);
}
TEST_F(ReplCoordTest, NodeEntersStartup2StateWhenStartingUpWithValidLocalConfig) {
@@ -202,10 +202,10 @@ TEST_F(ReplCoordTest, NodeEntersStartupStateWhenStartingUpWithNoLocalConfig) {
TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenInitiatedWithAnEmptyConfig) {
init("mySet");
start(HostAndPort("node1", 12345));
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
BSONObjBuilder result;
ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig,
- getReplCoord()->processReplSetInitiate(txn.get(), BSONObj(), &result));
+ getReplCoord()->processReplSetInitiate(opCtx.get(), BSONObj(), &result));
ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
}
@@ -215,12 +215,12 @@ TEST_F(ReplCoordTest,
start(HostAndPort("node1", 12345));
ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
// Starting uninitialized, show that we can perform the initiate behavior.
BSONObjBuilder result1;
ASSERT_OK(
- getReplCoord()->processReplSetInitiate(txn.get(),
+ getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
<< "version"
@@ -236,7 +236,7 @@ TEST_F(ReplCoordTest,
BSONObjBuilder result2;
ASSERT_EQUALS(
ErrorCodes::AlreadyInitialized,
- getReplCoord()->processReplSetInitiate(txn.get(),
+ getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
<< "version"
@@ -254,14 +254,14 @@ TEST_F(ReplCoordTest,
NodeReturnsInvalidReplicaSetConfigWhenInitiatingViaANodeThatCannotBecomePrimary) {
init("mySet");
start(HostAndPort("node1", 12345));
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
// Starting uninitialized, show that we can perform the initiate behavior.
BSONObjBuilder result1;
auto status =
- getReplCoord()->processReplSetInitiate(txn.get(),
+ getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
<< "version"
@@ -283,16 +283,16 @@ TEST_F(ReplCoordTest,
InitiateShouldSucceedWithAValidConfigEvenIfItHasFailedWithAnInvalidConfigPreviously) {
init("mySet");
start(HostAndPort("node1", 12345));
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
BSONObjBuilder result;
ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig,
- getReplCoord()->processReplSetInitiate(txn.get(), BSONObj(), &result));
+ getReplCoord()->processReplSetInitiate(opCtx.get(), BSONObj(), &result));
ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
// Having failed to initiate once, show that we can now initiate.
BSONObjBuilder result1;
ASSERT_OK(
- getReplCoord()->processReplSetInitiate(txn.get(),
+ getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
<< "version"
@@ -309,10 +309,10 @@ TEST_F(ReplCoordTest,
BSONObjBuilder result;
init("mySet");
start(HostAndPort("node1", 12345));
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
ASSERT_EQUALS(
ErrorCodes::InvalidReplicaSetConfig,
- getReplCoord()->processReplSetInitiate(txn.get(),
+ getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
<< "version"
@@ -326,9 +326,9 @@ TEST_F(ReplCoordTest,
void doReplSetInitiate(ReplicationCoordinatorImpl* replCoord, Status* status) {
BSONObjBuilder garbage;
auto client = getGlobalServiceContext()->makeClient("rsi");
- auto txn = client->makeOperationContext();
+ auto opCtx = client->makeOperationContext();
*status =
- replCoord->processReplSetInitiate(txn.get(),
+ replCoord->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
<< "version"
@@ -411,13 +411,13 @@ TEST_F(ReplCoordTest,
NodeReturnsInvalidReplicaSetConfigWhenInitiatingWithAConfigWithAMismatchedSetName) {
init("mySet");
start(HostAndPort("node1", 12345));
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
BSONObjBuilder result1;
ASSERT_EQUALS(
ErrorCodes::InvalidReplicaSetConfig,
- getReplCoord()->processReplSetInitiate(txn.get(),
+ getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "wrongSet"
<< "version"
@@ -432,11 +432,11 @@ TEST_F(ReplCoordTest,
TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenInitiatingWithAnEmptyConfig) {
init("mySet");
start(HostAndPort("node1", 12345));
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
BSONObjBuilder result1;
- auto status = getReplCoord()->processReplSetInitiate(txn.get(), BSONObj(), &result1);
+ auto status = getReplCoord()->processReplSetInitiate(opCtx.get(), BSONObj(), &result1);
ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig, status);
ASSERT_STRING_CONTAINS(status.reason(), "Missing expected field \"_id\"");
ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
@@ -445,12 +445,12 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenInitiatingWithAnEmpt
TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenInitiatingWithoutAn_idField) {
init("mySet");
start(HostAndPort("node1", 12345));
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
BSONObjBuilder result1;
auto status = getReplCoord()->processReplSetInitiate(
- txn.get(),
+ opCtx.get(),
BSON("version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1);
@@ -463,12 +463,12 @@ TEST_F(ReplCoordTest,
NodeReturnsInvalidReplicaSetConfigWhenInitiatingWithAConfigVersionNotEqualToOne) {
init("mySet");
start(HostAndPort("node1", 12345));
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
BSONObjBuilder result1;
auto status =
- getReplCoord()->processReplSetInitiate(txn.get(),
+ getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
<< "version"
@@ -485,13 +485,13 @@ TEST_F(ReplCoordTest,
TEST_F(ReplCoordTest, InitiateFailsWithoutReplSetFlag) {
init("");
start(HostAndPort("node1", 12345));
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
BSONObjBuilder result1;
ASSERT_EQUALS(
ErrorCodes::NoReplicationEnabled,
- getReplCoord()->processReplSetInitiate(txn.get(),
+ getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
<< "version"
@@ -506,7 +506,7 @@ TEST_F(ReplCoordTest, InitiateFailsWithoutReplSetFlag) {
TEST_F(ReplCoordTest, NodeReturnsOutOfDiskSpaceWhenInitiateCannotWriteConfigToDisk) {
init("mySet");
start(HostAndPort("node1", 12345));
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
BSONObjBuilder result1;
@@ -514,7 +514,7 @@ TEST_F(ReplCoordTest, NodeReturnsOutOfDiskSpaceWhenInitiateCannotWriteConfigToDi
Status(ErrorCodes::OutOfDiskSpace, "The test set this"));
ASSERT_EQUALS(
ErrorCodes::OutOfDiskSpace,
- getReplCoord()->processReplSetInitiate(txn.get(),
+ getReplCoord()->processReplSetInitiate(opCtx.get(),
BSON("_id"
<< "mySet"
<< "version"
@@ -602,7 +602,7 @@ TEST_F(ReplCoordTest, RollBackIDShouldIncreaseByOneWhenIncrementRollbackIDIsCall
TEST_F(ReplCoordTest, NodeReturnsImmediatelyWhenAwaitReplicationIsRanAgainstAStandaloneNode) {
init("");
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
OpTimeWithTermOne time(100, 1);
@@ -613,7 +613,7 @@ TEST_F(ReplCoordTest, NodeReturnsImmediatelyWhenAwaitReplicationIsRanAgainstASta
// Because we didn't set ReplSettings.replSet, it will think we're a standalone so
// awaitReplication will always work.
ReplicationCoordinator::StatusAndDuration statusAndDur =
- getReplCoord()->awaitReplication(txn.get(), time, writeConcern);
+ getReplCoord()->awaitReplication(opCtx.get(), time, writeConcern);
ASSERT_OK(statusAndDur.status);
}
@@ -621,7 +621,7 @@ TEST_F(ReplCoordTest, NodeReturnsImmediatelyWhenAwaitReplicationIsRanAgainstAMas
ReplSettings settings;
settings.setMaster(true);
init(settings);
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
OpTimeWithTermOne time(100, 1);
@@ -631,7 +631,7 @@ TEST_F(ReplCoordTest, NodeReturnsImmediatelyWhenAwaitReplicationIsRanAgainstAMas
writeConcern.wMode = WriteConcernOptions::kMajority;
// w:majority always works on master/slave
ReplicationCoordinator::StatusAndDuration statusAndDur =
- getReplCoord()->awaitReplication(txn.get(), time, writeConcern);
+ getReplCoord()->awaitReplication(opCtx.get(), time, writeConcern);
ASSERT_OK(statusAndDur.status);
}
@@ -655,7 +655,7 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenRunningAwaitReplicationAgainstASec
<< 2))),
HostAndPort("node1", 12345));
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
OpTimeWithTermOne time(100, 1);
@@ -666,7 +666,7 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenRunningAwaitReplicationAgainstASec
// Node should fail to awaitReplication when not primary.
ReplicationCoordinator::StatusAndDuration statusAndDur =
- getReplCoord()->awaitReplication(txn.get(), time, writeConcern);
+ getReplCoord()->awaitReplication(opCtx.get(), time, writeConcern);
ASSERT_EQUALS(ErrorCodes::PrimarySteppedDown, statusAndDur.status);
}
@@ -704,10 +704,10 @@ TEST_F(ReplCoordTest, NodeReturnsOkWhenRunningAwaitReplicationAgainstPrimaryWith
simulateSuccessfulV1Election();
ASSERT(getReplCoord()->getMemberState().primary());
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
ReplicationCoordinator::StatusAndDuration statusAndDur =
- getReplCoord()->awaitReplication(txn.get(), time, writeConcern);
+ getReplCoord()->awaitReplication(opCtx.get(), time, writeConcern);
ASSERT_OK(statusAndDur.status);
}
@@ -748,47 +748,47 @@ TEST_F(ReplCoordTest,
writeConcern.wNumNodes = 1;
writeConcern.syncMode = WriteConcernOptions::SyncMode::JOURNAL;
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
// 1 node waiting for time 1
ReplicationCoordinator::StatusAndDuration statusAndDur =
- getReplCoord()->awaitReplication(txn.get(), time1, writeConcern);
+ getReplCoord()->awaitReplication(opCtx.get(), time1, writeConcern);
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
getReplCoord()->setMyLastAppliedOpTime(time1);
getReplCoord()->setMyLastDurableOpTime(time1);
- statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, writeConcern);
+ statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, writeConcern);
ASSERT_OK(statusAndDur.status);
// 2 nodes waiting for time1
writeConcern.wNumNodes = 2;
- statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, writeConcern);
+ statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, writeConcern);
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
// Applied is not durable and will not satisfy WriteConcern with SyncMode JOURNAL.
ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(2, 1, time1));
- statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, writeConcern);
+ statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, writeConcern);
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
ASSERT_OK(getReplCoord()->setLastDurableOptime_forTest(2, 1, time1));
- statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, writeConcern);
+ statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, writeConcern);
ASSERT_OK(statusAndDur.status);
// 2 nodes waiting for time2
- statusAndDur = getReplCoord()->awaitReplication(txn.get(), time2, writeConcern);
+ statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern);
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
getReplCoord()->setMyLastAppliedOpTime(time2);
getReplCoord()->setMyLastDurableOpTime(time2);
- statusAndDur = getReplCoord()->awaitReplication(txn.get(), time2, writeConcern);
+ statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern);
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(2, 2, time2));
ASSERT_OK(getReplCoord()->setLastDurableOptime_forTest(2, 2, time2));
- statusAndDur = getReplCoord()->awaitReplication(txn.get(), time2, writeConcern);
+ statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern);
ASSERT_OK(statusAndDur.status);
// 3 nodes waiting for time2
writeConcern.wNumNodes = 3;
- statusAndDur = getReplCoord()->awaitReplication(txn.get(), time2, writeConcern);
+ statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern);
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(2, 3, time2));
ASSERT_OK(getReplCoord()->setLastDurableOptime_forTest(2, 3, time2));
- statusAndDur = getReplCoord()->awaitReplication(txn.get(), time2, writeConcern);
+ statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern);
ASSERT_OK(statusAndDur.status);
}
@@ -827,44 +827,44 @@ TEST_F(ReplCoordTest, NodeReturnsWriteConcernFailedUntilASufficientNumberOfNodes
writeConcern.wTimeout = WriteConcernOptions::kNoWaiting;
writeConcern.wNumNodes = 1;
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
// 1 node waiting for time 1
ReplicationCoordinator::StatusAndDuration statusAndDur =
- getReplCoord()->awaitReplication(txn.get(), time1, writeConcern);
+ getReplCoord()->awaitReplication(opCtx.get(), time1, writeConcern);
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
getReplCoord()->setMyLastAppliedOpTime(time1);
getReplCoord()->setMyLastDurableOpTime(time1);
- statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, writeConcern);
+ statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, writeConcern);
ASSERT_OK(statusAndDur.status);
// 2 nodes waiting for time1
writeConcern.wNumNodes = 2;
- statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, writeConcern);
+ statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, writeConcern);
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(2, 1, time1));
- statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, writeConcern);
+ statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, writeConcern);
ASSERT_OK(statusAndDur.status);
// 2 nodes waiting for time2
- statusAndDur = getReplCoord()->awaitReplication(txn.get(), time2, writeConcern);
+ statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern);
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
getReplCoord()->setMyLastAppliedOpTime(time2);
getReplCoord()->setMyLastDurableOpTime(time2);
- statusAndDur = getReplCoord()->awaitReplication(txn.get(), time2, writeConcern);
+ statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern);
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(2, 2, time2));
ASSERT_OK(getReplCoord()->setLastDurableOptime_forTest(2, 2, time2));
- statusAndDur = getReplCoord()->awaitReplication(txn.get(), time2, writeConcern);
+ statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern);
ASSERT_OK(statusAndDur.status);
// 3 nodes waiting for time2
writeConcern.wNumNodes = 3;
- statusAndDur = getReplCoord()->awaitReplication(txn.get(), time2, writeConcern);
+ statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern);
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(2, 3, time2));
- statusAndDur = getReplCoord()->awaitReplication(txn.get(), time2, writeConcern);
+ statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern);
ASSERT_OK(statusAndDur.status);
}
@@ -872,7 +872,7 @@ TEST_F(ReplCoordTest,
NodeReturnsUnknownReplWriteConcernWhenAwaitReplicationReceivesAnInvalidWriteConcernMode) {
auto service = stdx::make_unique<ServiceContextNoop>();
auto client = service->makeClient("test");
- auto txn = client->makeOperationContext();
+ auto opCtx = client->makeOperationContext();
assertStartSuccess(BSON("_id"
<< "mySet"
@@ -904,7 +904,7 @@ TEST_F(ReplCoordTest,
invalidWriteConcern.wMode = "fakemode";
ReplicationCoordinator::StatusAndDuration statusAndDur =
- getReplCoord()->awaitReplication(txn.get(), time1, invalidWriteConcern);
+ getReplCoord()->awaitReplication(opCtx.get(), time1, invalidWriteConcern);
ASSERT_EQUALS(ErrorCodes::UnknownReplWriteConcern, statusAndDur.status);
}
@@ -913,7 +913,7 @@ TEST_F(
NodeReturnsWriteConcernFailedUntilASufficientSetOfNodesHaveTheWriteAndTheWriteIsInACommittedSnapshot) {
auto service = stdx::make_unique<ServiceContextNoop>();
auto client = service->makeClient("test");
- auto txn = client->makeOperationContext();
+ auto opCtx = client->makeOperationContext();
assertStartSuccess(
BSON("_id"
@@ -988,11 +988,11 @@ TEST_F(
getReplCoord()->setMyLastAppliedOpTime(time1);
getReplCoord()->setMyLastDurableOpTime(time1);
ReplicationCoordinator::StatusAndDuration statusAndDur =
- getReplCoord()->awaitReplication(txn.get(), time1, majorityWriteConcern);
+ getReplCoord()->awaitReplication(opCtx.get(), time1, majorityWriteConcern);
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
- statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, multiDCWriteConcern);
+ statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, multiDCWriteConcern);
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
- statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, multiRackWriteConcern);
+ statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, multiRackWriteConcern);
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
// Majority satisfied but not either custom mode
@@ -1000,56 +1000,57 @@ TEST_F(
getReplCoord()->setLastDurableOptime_forTest(2, 1, time1);
getReplCoord()->setLastAppliedOptime_forTest(2, 2, time1);
getReplCoord()->setLastDurableOptime_forTest(2, 2, time1);
- getReplCoord()->createSnapshot(txn.get(), time1, SnapshotName(1));
+ getReplCoord()->createSnapshot(opCtx.get(), time1, SnapshotName(1));
- statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, majorityWriteConcern);
+ statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, majorityWriteConcern);
ASSERT_OK(statusAndDur.status);
- statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, multiDCWriteConcern);
+ statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, multiDCWriteConcern);
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
- statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, multiRackWriteConcern);
+ statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, multiRackWriteConcern);
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
// All modes satisfied
getReplCoord()->setLastAppliedOptime_forTest(2, 3, time1);
getReplCoord()->setLastDurableOptime_forTest(2, 3, time1);
- statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, majorityWriteConcern);
+ statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, majorityWriteConcern);
ASSERT_OK(statusAndDur.status);
- statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, multiDCWriteConcern);
+ statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, multiDCWriteConcern);
ASSERT_OK(statusAndDur.status);
- statusAndDur = getReplCoord()->awaitReplication(txn.get(), time1, multiRackWriteConcern);
+ statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, multiRackWriteConcern);
ASSERT_OK(statusAndDur.status);
// Majority also waits for the committed snapshot to be newer than all snapshots reserved by
// this operation. Custom modes not affected by this.
- while (getReplCoord()->reserveSnapshotName(txn.get()) <= SnapshotName(1)) {
+ while (getReplCoord()->reserveSnapshotName(opCtx.get()) <= SnapshotName(1)) {
// These unittests "cheat" and use SnapshotName(1) without advancing the counter. Reserve
// another name if we didn't get a high enough one.
}
auto zeroOpTimeInCurrentTerm = OpTime(Timestamp(0, 0), 1);
- ReplClientInfo::forClient(txn.get()->getClient()).setLastOp(zeroOpTimeInCurrentTerm);
+ ReplClientInfo::forClient(opCtx.get()->getClient()).setLastOp(zeroOpTimeInCurrentTerm);
statusAndDur =
- getReplCoord()->awaitReplicationOfLastOpForClient(txn.get(), majorityWriteConcern);
+ getReplCoord()->awaitReplicationOfLastOpForClient(opCtx.get(), majorityWriteConcern);
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
statusAndDur =
- getReplCoord()->awaitReplicationOfLastOpForClient(txn.get(), multiDCWriteConcern);
+ getReplCoord()->awaitReplicationOfLastOpForClient(opCtx.get(), multiDCWriteConcern);
ASSERT_OK(statusAndDur.status);
statusAndDur =
- getReplCoord()->awaitReplicationOfLastOpForClient(txn.get(), multiRackWriteConcern);
+ getReplCoord()->awaitReplicationOfLastOpForClient(opCtx.get(), multiRackWriteConcern);
ASSERT_OK(statusAndDur.status);
// All modes satisfied
- getReplCoord()->createSnapshot(txn.get(), time1, getReplCoord()->reserveSnapshotName(nullptr));
+ getReplCoord()->createSnapshot(
+ opCtx.get(), time1, getReplCoord()->reserveSnapshotName(nullptr));
statusAndDur =
- getReplCoord()->awaitReplicationOfLastOpForClient(txn.get(), majorityWriteConcern);
+ getReplCoord()->awaitReplicationOfLastOpForClient(opCtx.get(), majorityWriteConcern);
ASSERT_OK(statusAndDur.status);
statusAndDur =
- getReplCoord()->awaitReplicationOfLastOpForClient(txn.get(), multiDCWriteConcern);
+ getReplCoord()->awaitReplicationOfLastOpForClient(opCtx.get(), multiDCWriteConcern);
ASSERT_OK(statusAndDur.status);
statusAndDur =
- getReplCoord()->awaitReplicationOfLastOpForClient(txn.get(), multiRackWriteConcern);
+ getReplCoord()->awaitReplicationOfLastOpForClient(opCtx.get(), multiRackWriteConcern);
ASSERT_OK(statusAndDur.status);
// multiDC satisfied but not majority or multiRack
@@ -1058,11 +1059,11 @@ TEST_F(
getReplCoord()->setLastAppliedOptime_forTest(2, 3, time2);
getReplCoord()->setLastDurableOptime_forTest(2, 3, time2);
- statusAndDur = getReplCoord()->awaitReplication(txn.get(), time2, majorityWriteConcern);
+ statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, majorityWriteConcern);
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
- statusAndDur = getReplCoord()->awaitReplication(txn.get(), time2, multiDCWriteConcern);
+ statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, multiDCWriteConcern);
ASSERT_OK(statusAndDur.status);
- statusAndDur = getReplCoord()->awaitReplication(txn.get(), time2, multiRackWriteConcern);
+ statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, multiRackWriteConcern);
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
}
@@ -1080,12 +1081,12 @@ public:
: _replCoord(replCoord),
_service(service),
_client(service->makeClient("replAwaiter")),
- _txn(_client->makeOperationContext()),
+ _opCtx(_client->makeOperationContext()),
_finished(false),
_result(ReplicationCoordinator::StatusAndDuration(Status::OK(), Milliseconds(0))) {}
OperationContext* getOperationContext() {
- return _txn.get();
+ return _opCtx.get();
}
void setOpTime(const OpTime& ot) {
@@ -1116,14 +1117,14 @@ public:
private:
void _awaitReplication() {
- _result = _replCoord->awaitReplication(_txn.get(), _optime, _writeConcern);
+ _result = _replCoord->awaitReplication(_opCtx.get(), _optime, _writeConcern);
_finished = true;
}
ReplicationCoordinatorImpl* _replCoord;
ServiceContext* _service;
ServiceContext::UniqueClient _client;
- ServiceContext::UniqueOperationContext _txn;
+ ServiceContext::UniqueOperationContext _opCtx;
bool _finished;
OpTime _optime;
WriteConcernOptions _writeConcern;
@@ -1286,8 +1287,8 @@ TEST_F(ReplCoordTest,
ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(2, 1, time1));
ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(2, 2, time1));
{
- auto txn = makeOperationContext();
- shutdown(txn.get());
+ auto opCtx = makeOperationContext();
+ shutdown(opCtx.get());
}
ReplicationCoordinator::StatusAndDuration statusAndDur = awaiter.getResult();
ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, statusAndDur.status);
@@ -1320,7 +1321,7 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenSteppingDownBeforeSatisfyingAWrite
getReplCoord()->setMyLastDurableOpTime(OpTimeWithTermOne(100, 0));
simulateSuccessfulV1Election();
- const auto txn = makeOperationContext();
+ const auto opCtx = makeOperationContext();
ReplicationAwaiter awaiter(getReplCoord(), getServiceContext());
OpTimeWithTermOne time1(100, 1);
@@ -1336,7 +1337,7 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenSteppingDownBeforeSatisfyingAWrite
awaiter.start();
ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(2, 1, time1));
ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(2, 2, time1));
- ASSERT_OK(getReplCoord()->stepDown(txn.get(), true, Milliseconds(0), Milliseconds(1000)));
+ ASSERT_OK(getReplCoord()->stepDown(opCtx.get(), true, Milliseconds(0), Milliseconds(1000)));
ReplicationCoordinator::StatusAndDuration statusAndDur = awaiter.getResult();
ASSERT_EQUALS(ErrorCodes::PrimarySteppedDown, statusAndDur.status);
awaiter.reset();
@@ -1391,11 +1392,11 @@ protected:
static SharedClientAndOperation make(ServiceContext* serviceContext) {
SharedClientAndOperation result;
result.client = serviceContext->makeClient("StepDownThread");
- result.txn = result.client->makeOperationContext();
+ result.opCtx = result.client->makeOperationContext();
return result;
}
std::shared_ptr<Client> client;
- std::shared_ptr<OperationContext> txn;
+ std::shared_ptr<OperationContext> opCtx;
};
std::pair<SharedClientAndOperation, stdx::future<boost::optional<Status>>> stepDown_nonBlocking(
@@ -1405,7 +1406,7 @@ protected:
[=](PromisedClientAndOperation operationPromise) -> boost::optional<Status> {
auto result = SharedClientAndOperation::make(getServiceContext());
operationPromise.set_value(result);
- return getReplCoord()->stepDown(result.txn.get(), force, waitTime, stepDownTime);
+ return getReplCoord()->stepDown(result.opCtx.get(), force, waitTime, stepDownTime);
});
auto result = task.get_future();
PromisedClientAndOperation operationPromise;
@@ -1443,9 +1444,9 @@ private:
TEST_F(ReplCoordTest, NodeReturnsBadValueWhenUpdateTermIsRunAgainstANonReplNode) {
init(ReplSettings());
ASSERT_TRUE(ReplicationCoordinator::modeNone == getReplCoord()->getReplicationMode());
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
- ASSERT_EQUALS(ErrorCodes::BadValue, getReplCoord()->updateTerm(txn.get(), 0).code());
+ ASSERT_EQUALS(ErrorCodes::BadValue, getReplCoord()->updateTerm(opCtx.get(), 0).code());
}
TEST_F(ReplCoordTest, NodeChangesTermAndStepsDownWhenAndOnlyWhenUpdateTermSuppliesAHigherTerm) {
@@ -1471,31 +1472,31 @@ TEST_F(ReplCoordTest, NodeChangesTermAndStepsDownWhenAndOnlyWhenUpdateTermSuppli
ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
simulateSuccessfulV1Election();
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
ASSERT_EQUALS(1, getReplCoord()->getTerm());
ASSERT_TRUE(getReplCoord()->getMemberState().primary());
// lower term, no change
- ASSERT_OK(getReplCoord()->updateTerm(txn.get(), 0));
+ ASSERT_OK(getReplCoord()->updateTerm(opCtx.get(), 0));
ASSERT_EQUALS(1, getReplCoord()->getTerm());
ASSERT_TRUE(getReplCoord()->getMemberState().primary());
// same term, no change
- ASSERT_OK(getReplCoord()->updateTerm(txn.get(), 1));
+ ASSERT_OK(getReplCoord()->updateTerm(opCtx.get(), 1));
ASSERT_EQUALS(1, getReplCoord()->getTerm());
ASSERT_TRUE(getReplCoord()->getMemberState().primary());
// higher term, step down and change term
executor::TaskExecutor::CallbackHandle cbHandle;
- ASSERT_EQUALS(ErrorCodes::StaleTerm, getReplCoord()->updateTerm(txn.get(), 2).code());
+ ASSERT_EQUALS(ErrorCodes::StaleTerm, getReplCoord()->updateTerm(opCtx.get(), 2).code());
// Term hasn't been incremented yet, as we need another try to update it after stepdown.
ASSERT_EQUALS(1, getReplCoord()->getTerm());
ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
// Now update term should actually update the term, as stepdown is complete.
- ASSERT_EQUALS(ErrorCodes::StaleTerm, getReplCoord()->updateTerm(txn.get(), 2).code());
+ ASSERT_EQUALS(ErrorCodes::StaleTerm, getReplCoord()->updateTerm(opCtx.get(), 2).code());
ASSERT_EQUALS(2, getReplCoord()->getTerm());
}
@@ -1576,7 +1577,7 @@ TEST_F(ReplCoordTest, ConcurrentStepDownShouldNotSignalTheSameFinishEventMoreTha
}
TEST_F(StepDownTest, NodeReturnsNotMasterWhenAskedToStepDownAsANonPrimaryNode) {
- const auto txn = makeOperationContext();
+ const auto opCtx = makeOperationContext();
OpTimeWithTermOne optime1(100, 1);
// All nodes are caught up
@@ -1585,7 +1586,7 @@ TEST_F(StepDownTest, NodeReturnsNotMasterWhenAskedToStepDownAsANonPrimaryNode) {
ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(1, 1, optime1));
ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(1, 2, optime1));
- Status status = getReplCoord()->stepDown(txn.get(), false, Milliseconds(0), Milliseconds(0));
+ Status status = getReplCoord()->stepDown(opCtx.get(), false, Milliseconds(0), Milliseconds(0));
ASSERT_EQUALS(ErrorCodes::NotMaster, status);
ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
}
@@ -1601,12 +1602,13 @@ TEST_F(StepDownTest,
simulateSuccessfulV1Election();
- const auto txn = makeOperationContext();
+ const auto opCtx = makeOperationContext();
// Make sure stepDown cannot grab the global shared lock
- Lock::GlobalWrite lk(txn->lockState());
+ Lock::GlobalWrite lk(opCtx->lockState());
- Status status = getReplCoord()->stepDown(txn.get(), false, Milliseconds(0), Milliseconds(1000));
+ Status status =
+ getReplCoord()->stepDown(opCtx.get(), false, Milliseconds(0), Milliseconds(1000));
ASSERT_EQUALS(ErrorCodes::ExceededTimeLimit, status);
ASSERT_TRUE(getReplCoord()->getMemberState().primary());
}
@@ -1710,10 +1712,10 @@ TEST_F(
getNet()->runReadyNetworkOperations();
exitNetwork();
- const auto txn = makeOperationContext();
+ const auto opCtx = makeOperationContext();
ASSERT_TRUE(getReplCoord()->getMemberState().primary());
- auto status = getReplCoord()->stepDown(txn.get(), false, Milliseconds(0), Milliseconds(1000));
+ auto status = getReplCoord()->stepDown(opCtx.get(), false, Milliseconds(0), Milliseconds(1000));
ASSERT_EQUALS(ErrorCodes::ExceededTimeLimit, status);
ASSERT_TRUE(getReplCoord()->getMemberState().primary());
}
@@ -1744,10 +1746,10 @@ TEST_F(StepDownTestFiveNode,
getNet()->runReadyNetworkOperations();
exitNetwork();
- const auto txn = makeOperationContext();
+ const auto opCtx = makeOperationContext();
ASSERT_TRUE(getReplCoord()->getMemberState().primary());
- auto status = getReplCoord()->stepDown(txn.get(), false, Milliseconds(0), Milliseconds(1000));
+ auto status = getReplCoord()->stepDown(opCtx.get(), false, Milliseconds(0), Milliseconds(1000));
ASSERT_EQUALS(ErrorCodes::ExceededTimeLimit, status);
ASSERT_TRUE(getReplCoord()->getMemberState().primary());
}
@@ -1779,10 +1781,10 @@ TEST_F(
getNet()->runReadyNetworkOperations();
exitNetwork();
- const auto txn = makeOperationContext();
+ const auto opCtx = makeOperationContext();
ASSERT_TRUE(getReplCoord()->getMemberState().primary());
- ASSERT_OK(getReplCoord()->stepDown(txn.get(), false, Milliseconds(0), Milliseconds(1000)));
+ ASSERT_OK(getReplCoord()->stepDown(opCtx.get(), false, Milliseconds(0), Milliseconds(1000)));
enterNetwork(); // So we can safely inspect the topology coordinator
ASSERT_EQUALS(getNet()->now() + Seconds(1), getTopoCoord().getStepDownTime());
ASSERT_TRUE(getTopoCoord().getMemberState().secondary());
@@ -1802,9 +1804,9 @@ TEST_F(ReplCoordTest, NodeBecomesPrimaryAgainWhenStepDownTimeoutExpiresInASingle
<< "test1:1234"))),
HostAndPort("test1", 1234));
runSingleNodeElection(makeOperationContext(), getReplCoord(), getNet());
- const auto txn = makeOperationContext();
+ const auto opCtx = makeOperationContext();
- ASSERT_OK(getReplCoord()->stepDown(txn.get(), true, Milliseconds(0), Milliseconds(1000)));
+ ASSERT_OK(getReplCoord()->stepDown(opCtx.get(), true, Milliseconds(0), Milliseconds(1000)));
getNet()->enterNetwork(); // Must do this before inspecting the topocoord
Date_t stepdownUntil = getNet()->now() + Seconds(1);
ASSERT_EQUALS(stepdownUntil, getTopoCoord().getStepDownTime());
@@ -1833,10 +1835,10 @@ TEST_F(StepDownTest,
simulateSuccessfulV1Election();
- const auto txn = makeOperationContext();
+ const auto opCtx = makeOperationContext();
// Try to stepDown but time out because no secondaries are caught up.
- auto status = repl->stepDown(txn.get(), false, Milliseconds(0), Milliseconds(1000));
+ auto status = repl->stepDown(opCtx.get(), false, Milliseconds(0), Milliseconds(1000));
ASSERT_EQUALS(ErrorCodes::ExceededTimeLimit, status);
ASSERT_TRUE(repl->getMemberState().primary());
@@ -1851,7 +1853,7 @@ TEST_F(StepDownTest,
}
getNet()->exitNetwork();
ASSERT_TRUE(repl->getMemberState().primary());
- status = repl->stepDown(txn.get(), true, Milliseconds(0), Milliseconds(1000));
+ status = repl->stepDown(opCtx.get(), true, Milliseconds(0), Milliseconds(1000));
ASSERT_OK(status);
ASSERT_TRUE(repl->getMemberState().secondary());
}
@@ -2004,7 +2006,7 @@ TEST_F(StepDownTest, NodeReturnsInterruptedWhenInterruptedDuringStepDown) {
// stepDown where the secondary actually has to catch up before the stepDown can succeed.
auto result = stepDown_nonBlocking(false, Seconds(10), Seconds(60));
- killOperation(result.first.txn.get());
+ killOperation(result.first.opCtx.get());
ASSERT_EQUALS(ErrorCodes::Interrupted, *result.second.get());
ASSERT_TRUE(repl->getMemberState().primary());
}
@@ -2347,11 +2349,11 @@ TEST_F(ReplCoordTest, DoNotAllowMaintenanceModeWhilePrimary) {
ASSERT_EQUALS(ErrorCodes::NotSecondary, status);
ASSERT_TRUE(getReplCoord()->getMemberState().primary());
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
// Step down from primary.
- getReplCoord()->updateTerm(txn.get(), getReplCoord()->getTerm() + 1);
+ getReplCoord()->updateTerm(opCtx.get(), getReplCoord()->getTerm() + 1);
ASSERT_OK(getReplCoord()->waitForMemberState(MemberState::RS_SECONDARY, Seconds(1)));
status = getReplCoord()->setMaintenanceMode(false);
@@ -2383,11 +2385,11 @@ TEST_F(ReplCoordTest, DoNotAllowSettingMaintenanceModeWhileConductingAnElection)
// TODO this election shouldn't have to happen.
simulateSuccessfulV1Election();
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
// Step down from primary.
- getReplCoord()->updateTerm(txn.get(), getReplCoord()->getTerm() + 1);
+ getReplCoord()->updateTerm(opCtx.get(), getReplCoord()->getTerm() + 1);
getReplCoord()->waitForMemberState(MemberState::RS_SECONDARY, Milliseconds(10 * 1000));
// Can't modify maintenance mode when running for election (before and after dry run).
@@ -2514,7 +2516,7 @@ TEST_F(ReplCoordTest, NodeDoesNotIncludeItselfWhenRunningGetHostsWrittenToInMast
settings.setMaster(true);
init(settings);
HostAndPort clientHost("node2:12345");
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
OID client = OID::gen();
@@ -2524,7 +2526,7 @@ TEST_F(ReplCoordTest, NodeDoesNotIncludeItselfWhenRunningGetHostsWrittenToInMast
getExternalState()->setClientHostAndPort(clientHost);
HandshakeArgs handshake;
ASSERT_OK(handshake.initialize(BSON("handshake" << client)));
- ASSERT_OK(getReplCoord()->processHandshake(txn.get(), handshake));
+ ASSERT_OK(getReplCoord()->processHandshake(opCtx.get(), handshake));
getReplCoord()->setMyLastAppliedOpTime(time2);
getReplCoord()->setMyLastDurableOpTime(time2);
@@ -2675,11 +2677,11 @@ TEST_F(ReplCoordTest, IsMasterWithCommittedSnapshot) {
time_t majorityWriteDate = 100;
OpTime majorityOpTime = OpTime(Timestamp(majorityWriteDate, 1), 1);
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
getReplCoord()->setMyLastAppliedOpTime(opTime);
getReplCoord()->setMyLastDurableOpTime(opTime);
- getReplCoord()->createSnapshot(txn.get(), majorityOpTime, SnapshotName(1));
+ getReplCoord()->createSnapshot(opCtx.get(), majorityOpTime, SnapshotName(1));
ASSERT_EQUALS(majorityOpTime, getReplCoord()->getCurrentCommittedSnapshotOpTime());
IsMasterResponse response;
@@ -2695,8 +2697,8 @@ TEST_F(ReplCoordTest, LogAMessageWhenShutDownBeforeReplicationStartUpFinished) {
init();
startCapturingLogMessages();
{
- auto txn = makeOperationContext();
- getReplCoord()->shutdown(txn.get());
+ auto opCtx = makeOperationContext();
+ getReplCoord()->shutdown(opCtx.get());
}
stopCapturingLogMessages();
ASSERT_EQUALS(1, countLogLinesContaining("shutdown() called before startup() finished"));
@@ -2735,11 +2737,11 @@ TEST_F(ReplCoordTest, DoNotProcessSelfWhenUpdatePositionContainsInfoAboutSelf) {
writeConcern.wTimeout = WriteConcernOptions::kNoWaiting;
writeConcern.wNumNodes = 1;
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
- getReplCoord()->awaitReplication(txn.get(), time2, writeConcern).status);
+ getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern).status);
// receive updatePosition containing ourself, should not process the update for self
UpdatePositionArgs args;
@@ -2757,7 +2759,7 @@ TEST_F(ReplCoordTest, DoNotProcessSelfWhenUpdatePositionContainsInfoAboutSelf) {
ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args, 0));
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
- getReplCoord()->awaitReplication(txn.get(), time2, writeConcern).status);
+ getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern).status);
}
TEST_F(ReplCoordTest, DoNotProcessSelfWhenOldUpdatePositionContainsInfoAboutSelf) {
@@ -2794,11 +2796,11 @@ TEST_F(ReplCoordTest, DoNotProcessSelfWhenOldUpdatePositionContainsInfoAboutSelf
writeConcern.wTimeout = WriteConcernOptions::kNoWaiting;
writeConcern.wNumNodes = 1;
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
- getReplCoord()->awaitReplication(txn.get(), time2, writeConcern).status);
+ getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern).status);
// receive updatePosition containing ourself, should not process the update for self
OldUpdatePositionArgs args;
@@ -2814,7 +2816,7 @@ TEST_F(ReplCoordTest, DoNotProcessSelfWhenOldUpdatePositionContainsInfoAboutSelf
ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args, 0));
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
- getReplCoord()->awaitReplication(txn.get(), time2, writeConcern).status);
+ getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern).status);
}
TEST_F(ReplCoordTest, DoNotProcessUpdatePositionWhenItsConfigVersionIsIncorrect) {
@@ -2864,14 +2866,14 @@ TEST_F(ReplCoordTest, DoNotProcessUpdatePositionWhenItsConfigVersionIsIncorrect)
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< time2.toBSON())))));
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
long long cfgver;
ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig,
getReplCoord()->processReplSetUpdatePosition(args, &cfgver));
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
- getReplCoord()->awaitReplication(txn.get(), time2, writeConcern).status);
+ getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern).status);
}
TEST_F(ReplCoordTest, DoNotProcessOldUpdatePositionWhenItsConfigVersionIsIncorrect) {
@@ -2920,14 +2922,14 @@ TEST_F(ReplCoordTest, DoNotProcessOldUpdatePositionWhenItsConfigVersionIsIncorre
<< OldUpdatePositionArgs::kOpTimeFieldName
<< time2.timestamp)))));
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
long long cfgver;
ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig,
getReplCoord()->processReplSetUpdatePosition(args, &cfgver));
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
- getReplCoord()->awaitReplication(txn.get(), time2, writeConcern).status);
+ getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern).status);
}
TEST_F(ReplCoordTest, DoNotProcessUpdatePositionOfMembersWhoseIdsAreNotInTheConfig) {
@@ -2977,12 +2979,12 @@ TEST_F(ReplCoordTest, DoNotProcessUpdatePositionOfMembersWhoseIdsAreNotInTheConf
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< time2.toBSON())))));
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
ASSERT_EQUALS(ErrorCodes::NodeNotFound, getReplCoord()->processReplSetUpdatePosition(args, 0));
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
- getReplCoord()->awaitReplication(txn.get(), time2, writeConcern).status);
+ getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern).status);
}
TEST_F(ReplCoordTest, DoNotProcessOldUpdatePositionOfMembersWhoseIdsAreNotInTheConfig) {
@@ -3031,12 +3033,12 @@ TEST_F(ReplCoordTest, DoNotProcessOldUpdatePositionOfMembersWhoseIdsAreNotInTheC
<< OldUpdatePositionArgs::kOpTimeFieldName
<< time2.timestamp)))));
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
ASSERT_EQUALS(ErrorCodes::NodeNotFound, getReplCoord()->processReplSetUpdatePosition(args, 0));
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
- getReplCoord()->awaitReplication(txn.get(), time2, writeConcern).status);
+ getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern).status);
}
TEST_F(ReplCoordTest,
@@ -3095,19 +3097,19 @@ TEST_F(ReplCoordTest,
<< OldUpdatePositionArgs::kOpTimeFieldName
<< time2.timestamp)))));
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args, 0));
- ASSERT_OK(getReplCoord()->awaitReplication(txn.get(), time2, writeConcern).status);
+ ASSERT_OK(getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern).status);
writeConcern.wNumNodes = 3;
- ASSERT_OK(getReplCoord()->awaitReplication(txn.get(), time2, writeConcern).status);
+ ASSERT_OK(getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern).status);
}
void doReplSetReconfig(ReplicationCoordinatorImpl* replCoord, Status* status) {
auto client = getGlobalServiceContext()->makeClient("rsr");
- auto txn = client->makeOperationContext();
+ auto opCtx = client->makeOperationContext();
BSONObjBuilder garbage;
ReplSetReconfigArgs args;
@@ -3125,7 +3127,7 @@ void doReplSetReconfig(ReplicationCoordinatorImpl* replCoord, Status* status) {
<< "node2:12345")
<< BSON("_id" << 2 << "host"
<< "node3:12345")));
- *status = replCoord->processReplSetReconfig(txn.get(), args, &garbage);
+ *status = replCoord->processReplSetReconfig(opCtx.get(), args, &garbage);
}
TEST_F(ReplCoordTest, AwaitReplicationShouldResolveAsNormalDuringAReconfig) {
@@ -3205,7 +3207,7 @@ TEST_F(ReplCoordTest, AwaitReplicationShouldResolveAsNormalDuringAReconfig) {
void doReplSetReconfigToFewer(ReplicationCoordinatorImpl* replCoord, Status* status) {
auto client = getGlobalServiceContext()->makeClient("rsr");
- auto txn = client->makeOperationContext();
+ auto opCtx = client->makeOperationContext();
BSONObjBuilder garbage;
ReplSetReconfigArgs args;
@@ -3219,7 +3221,7 @@ void doReplSetReconfigToFewer(ReplicationCoordinatorImpl* replCoord, Status* sta
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node3:12345")));
- *status = replCoord->processReplSetReconfig(txn.get(), args, &garbage);
+ *status = replCoord->processReplSetReconfig(opCtx.get(), args, &garbage);
}
TEST_F(
@@ -3323,11 +3325,11 @@ TEST_F(ReplCoordTest,
simulateSuccessfulV1Election();
OpTime time(Timestamp(100, 2), 1);
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
getReplCoord()->setMyLastAppliedOpTime(time);
getReplCoord()->setMyLastDurableOpTime(time);
- getReplCoord()->createSnapshot(txn.get(), time, SnapshotName(1));
+ getReplCoord()->createSnapshot(opCtx.get(), time, SnapshotName(1));
ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(2, 1, time));
@@ -3350,7 +3352,7 @@ TEST_F(ReplCoordTest,
writeConcern.syncMode = WriteConcernOptions::SyncMode::NONE;
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
- getReplCoord()->awaitReplication(txn.get(), time, writeConcern2).status);
+ getReplCoord()->awaitReplication(opCtx.get(), time, writeConcern2).status);
// reconfig to three nodes
Status status(ErrorCodes::InternalError, "Not Set");
@@ -3413,30 +3415,30 @@ TEST_F(ReplCoordTest,
majorityWriteConcern.wMode = WriteConcernOptions::kMajority;
majorityWriteConcern.syncMode = WriteConcernOptions::SyncMode::JOURNAL;
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
- getReplCoord()->awaitReplication(txn.get(), time, majorityWriteConcern).status);
+ getReplCoord()->awaitReplication(opCtx.get(), time, majorityWriteConcern).status);
ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(2, 1, time));
ASSERT_OK(getReplCoord()->setLastDurableOptime_forTest(2, 1, time));
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
- getReplCoord()->awaitReplication(txn.get(), time, majorityWriteConcern).status);
+ getReplCoord()->awaitReplication(opCtx.get(), time, majorityWriteConcern).status);
// this member does not vote and as a result should not count towards write concern
ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(2, 3, time));
ASSERT_OK(getReplCoord()->setLastDurableOptime_forTest(2, 3, time));
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
- getReplCoord()->awaitReplication(txn.get(), time, majorityWriteConcern).status);
+ getReplCoord()->awaitReplication(opCtx.get(), time, majorityWriteConcern).status);
ASSERT_OK(getReplCoord()->setLastAppliedOptime_forTest(2, 2, time));
ASSERT_OK(getReplCoord()->setLastDurableOptime_forTest(2, 2, time));
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
- getReplCoord()->awaitReplication(txn.get(), time, majorityWriteConcern).status);
+ getReplCoord()->awaitReplication(opCtx.get(), time, majorityWriteConcern).status);
- getReplCoord()->createSnapshot(txn.get(), time, SnapshotName(1));
- ASSERT_OK(getReplCoord()->awaitReplication(txn.get(), time, majorityWriteConcern).status);
+ getReplCoord()->createSnapshot(opCtx.get(), time, SnapshotName(1));
+ ASSERT_OK(getReplCoord()->awaitReplication(opCtx.get(), time, majorityWriteConcern).status);
}
TEST_F(ReplCoordTest,
@@ -3527,12 +3529,13 @@ TEST_F(ReplCoordTest, NodeReturnsShutdownInProgressWhenWaitingUntilAnOpTimeDurin
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermOne(10, 0));
getReplCoord()->setMyLastDurableOpTime(OpTimeWithTermOne(10, 0));
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
- shutdown(txn.get());
+ shutdown(opCtx.get());
auto status = getReplCoord()->waitUntilOpTimeForRead(
- txn.get(), ReadConcernArgs(OpTimeWithTermOne(50, 0), ReadConcernLevel::kLocalReadConcern));
+ opCtx.get(),
+ ReadConcernArgs(OpTimeWithTermOne(50, 0), ReadConcernLevel::kLocalReadConcern));
ASSERT_EQ(status, ErrorCodes::ShutdownInProgress);
}
@@ -3551,11 +3554,12 @@ TEST_F(ReplCoordTest, NodeReturnsInterruptedWhenWaitingUntilAnOpTimeIsInterrupte
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermOne(10, 0));
getReplCoord()->setMyLastDurableOpTime(OpTimeWithTermOne(10, 0));
- const auto txn = makeOperationContext();
- killOperation(txn.get());
+ const auto opCtx = makeOperationContext();
+ killOperation(opCtx.get());
auto status = getReplCoord()->waitUntilOpTimeForRead(
- txn.get(), ReadConcernArgs(OpTimeWithTermOne(50, 0), ReadConcernLevel::kLocalReadConcern));
+ opCtx.get(),
+ ReadConcernArgs(OpTimeWithTermOne(50, 0), ReadConcernLevel::kLocalReadConcern));
ASSERT_EQ(status, ErrorCodes::Interrupted);
}
@@ -3571,9 +3575,9 @@ TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesNoOpTi
<< 0))),
HostAndPort("node1", 12345));
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
- ASSERT_OK(getReplCoord()->waitUntilOpTimeForRead(txn.get(), ReadConcernArgs()));
+ ASSERT_OK(getReplCoord()->waitUntilOpTimeForRead(opCtx.get(), ReadConcernArgs()));
}
TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesAnOpTimePriorToOurLast) {
@@ -3591,10 +3595,11 @@ TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesAnOpTi
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermOne(100, 0));
getReplCoord()->setMyLastDurableOpTime(OpTimeWithTermOne(100, 0));
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
ASSERT_OK(getReplCoord()->waitUntilOpTimeForRead(
- txn.get(), ReadConcernArgs(OpTimeWithTermOne(50, 0), ReadConcernLevel::kLocalReadConcern)));
+ opCtx.get(),
+ ReadConcernArgs(OpTimeWithTermOne(50, 0), ReadConcernLevel::kLocalReadConcern)));
}
TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesAnOpTimeEqualToOurLast) {
@@ -3614,20 +3619,21 @@ TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesAnOpTi
getReplCoord()->setMyLastAppliedOpTime(time);
getReplCoord()->setMyLastDurableOpTime(time);
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
ASSERT_OK(getReplCoord()->waitUntilOpTimeForRead(
- txn.get(), ReadConcernArgs(time, ReadConcernLevel::kLocalReadConcern)));
+ opCtx.get(), ReadConcernArgs(time, ReadConcernLevel::kLocalReadConcern)));
}
TEST_F(ReplCoordTest,
NodeReturnsNotAReplicaSetWhenWaitUntilOpTimeIsRunWithoutMajorityReadConcernEnabled) {
init(ReplSettings());
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
auto status = getReplCoord()->waitUntilOpTimeForRead(
- txn.get(), ReadConcernArgs(OpTimeWithTermOne(50, 0), ReadConcernLevel::kLocalReadConcern));
+ opCtx.get(),
+ ReadConcernArgs(OpTimeWithTermOne(50, 0), ReadConcernLevel::kLocalReadConcern));
ASSERT_EQ(status, ErrorCodes::NotAReplicaSet);
}
@@ -3636,10 +3642,10 @@ TEST_F(ReplCoordTest, NodeReturnsNotAReplicaSetWhenWaitUntilOpTimeIsRunAgainstAS
settings.setMajorityReadConcernEnabled(true);
init(settings);
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
auto status = getReplCoord()->waitUntilOpTimeForRead(
- txn.get(),
+ opCtx.get(),
ReadConcernArgs(OpTime(Timestamp(50, 0), 0), ReadConcernLevel::kMajorityReadConcern));
ASSERT_EQ(status, ErrorCodes::NotAReplicaSet);
}
@@ -3662,11 +3668,11 @@ TEST_F(ReplCoordTest, ReadAfterCommittedWhileShutdown) {
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(10, 0), 0));
getReplCoord()->setMyLastDurableOpTime(OpTime(Timestamp(10, 0), 0));
- auto txn = makeOperationContext();
- shutdown(txn.get());
+ auto opCtx = makeOperationContext();
+ shutdown(opCtx.get());
auto status = getReplCoord()->waitUntilOpTimeForRead(
- txn.get(),
+ opCtx.get(),
ReadConcernArgs(OpTime(Timestamp(50, 0), 0), ReadConcernLevel::kMajorityReadConcern));
ASSERT_EQUALS(status, ErrorCodes::ShutdownInProgress);
}
@@ -3683,13 +3689,13 @@ TEST_F(ReplCoordTest, ReadAfterCommittedInterrupted) {
<< 0))),
HostAndPort("node1", 12345));
runSingleNodeElection(makeOperationContext(), getReplCoord(), getNet());
- const auto txn = makeOperationContext();
+ const auto opCtx = makeOperationContext();
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(10, 0), 0));
getReplCoord()->setMyLastDurableOpTime(OpTime(Timestamp(10, 0), 0));
- killOperation(txn.get());
+ killOperation(opCtx.get());
auto status = getReplCoord()->waitUntilOpTimeForRead(
- txn.get(),
+ opCtx.get(),
ReadConcernArgs(OpTime(Timestamp(50, 0), 0), ReadConcernLevel::kMajorityReadConcern));
ASSERT_EQUALS(status, ErrorCodes::Interrupted);
}
@@ -3707,13 +3713,13 @@ TEST_F(ReplCoordTest, ReadAfterCommittedGreaterOpTime) {
HostAndPort("node1", 12345));
runSingleNodeElection(makeOperationContext(), getReplCoord(), getNet());
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(100, 0), 1));
getReplCoord()->setMyLastDurableOpTime(OpTime(Timestamp(100, 0), 1));
- getReplCoord()->createSnapshot(txn.get(), OpTime(Timestamp(100, 0), 1), SnapshotName(1));
+ getReplCoord()->createSnapshot(opCtx.get(), OpTime(Timestamp(100, 0), 1), SnapshotName(1));
ASSERT_OK(getReplCoord()->waitUntilOpTimeForRead(
- txn.get(),
+ opCtx.get(),
ReadConcernArgs(OpTime(Timestamp(50, 0), 1), ReadConcernLevel::kMajorityReadConcern)));
}
@@ -3729,15 +3735,15 @@ TEST_F(ReplCoordTest, ReadAfterCommittedEqualOpTime) {
<< 0))),
HostAndPort("node1", 12345));
runSingleNodeElection(makeOperationContext(), getReplCoord(), getNet());
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
OpTime time(Timestamp(100, 0), 1);
getReplCoord()->setMyLastAppliedOpTime(time);
getReplCoord()->setMyLastDurableOpTime(time);
- getReplCoord()->createSnapshot(txn.get(), time, SnapshotName(1));
+ getReplCoord()->createSnapshot(opCtx.get(), time, SnapshotName(1));
ASSERT_OK(getReplCoord()->waitUntilOpTimeForRead(
- txn.get(), ReadConcernArgs(time, ReadConcernLevel::kMajorityReadConcern)));
+ opCtx.get(), ReadConcernArgs(time, ReadConcernLevel::kMajorityReadConcern)));
}
TEST_F(ReplCoordTest, ReadAfterCommittedDeferredGreaterOpTime) {
@@ -3762,10 +3768,10 @@ TEST_F(ReplCoordTest, ReadAfterCommittedDeferredGreaterOpTime) {
getReplCoord()->createSnapshot(nullptr, committedOpTime, SnapshotName(1));
});
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
ASSERT_OK(getReplCoord()->waitUntilOpTimeForRead(
- txn.get(),
+ opCtx.get(),
ReadConcernArgs(OpTime(Timestamp(100, 0), 1), ReadConcernLevel::kMajorityReadConcern)));
}
@@ -3793,10 +3799,10 @@ TEST_F(ReplCoordTest, ReadAfterCommittedDeferredEqualOpTime) {
getReplCoord()->createSnapshot(nullptr, opTimeToWait, SnapshotName(1));
});
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
ASSERT_OK(getReplCoord()->waitUntilOpTimeForRead(
- txn.get(), ReadConcernArgs(opTimeToWait, ReadConcernLevel::kMajorityReadConcern)));
+ opCtx.get(), ReadConcernArgs(opTimeToWait, ReadConcernLevel::kMajorityReadConcern)));
pseudoLogOp.get();
}
@@ -3880,13 +3886,13 @@ TEST_F(ReplCoordTest, UpdateLastCommittedOpTimeWhenTheLastCommittedOpTimeIsNewer
HostAndPort("node1", 12345));
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
- auto txn = makeOperationContext();
- getReplCoord()->updateTerm(txn.get(), 1);
+ auto opCtx = makeOperationContext();
+ getReplCoord()->updateTerm(opCtx.get(), 1);
ASSERT_EQUALS(1, getReplCoord()->getTerm());
OpTime time(Timestamp(10, 0), 1);
OpTime oldTime(Timestamp(9, 0), 1);
- getReplCoord()->createSnapshot(txn.get(), time, SnapshotName(1));
+ getReplCoord()->createSnapshot(opCtx.get(), time, SnapshotName(1));
// higher OpTime, should change
getReplCoord()->advanceCommitPoint(time);
@@ -3922,8 +3928,8 @@ TEST_F(ReplCoordTest, UpdateTermWhenTheTermFromMetadataIsNewerButNeverUpdateCurr
<< 1),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
- auto txn = makeOperationContext();
- getReplCoord()->updateTerm(txn.get(), 1);
+ auto opCtx = makeOperationContext();
+ getReplCoord()->updateTerm(opCtx.get(), 1);
ASSERT_EQUALS(1, getReplCoord()->getTerm());
// higher term, should change
@@ -4001,8 +4007,8 @@ TEST_F(ReplCoordTest,
<< 1),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
- auto txn = makeOperationContext();
- getReplCoord()->updateTerm(txn.get(), 1);
+ auto opCtx = makeOperationContext();
+ getReplCoord()->updateTerm(opCtx.get(), 1);
ASSERT_EQUALS(1, getReplCoord()->getTerm());
auto replCoord = getReplCoord();
@@ -4129,8 +4135,8 @@ TEST_F(ReplCoordTest, TermAndLastCommittedOpTimeUpdatedFromHeartbeatWhenArbiter)
<< 1),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
- auto txn = makeOperationContext();
- getReplCoord()->updateTerm(txn.get(), 1);
+ auto opCtx = makeOperationContext();
+ getReplCoord()->updateTerm(opCtx.get(), 1);
ASSERT_EQUALS(1, getReplCoord()->getTerm());
auto replCoord = getReplCoord();
@@ -4555,11 +4561,11 @@ TEST_F(ReplCoordTest, AdvanceCommittedSnapshotToMostRecentSnapshotPriorToOpTimeW
OpTime time4(Timestamp(100, 4), 1);
OpTime time5(Timestamp(100, 5), 1);
OpTime time6(Timestamp(100, 6), 1);
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
- getReplCoord()->createSnapshot(txn.get(), time1, SnapshotName(1));
- getReplCoord()->createSnapshot(txn.get(), time2, SnapshotName(2));
- getReplCoord()->createSnapshot(txn.get(), time5, SnapshotName(3));
+ getReplCoord()->createSnapshot(opCtx.get(), time1, SnapshotName(1));
+ getReplCoord()->createSnapshot(opCtx.get(), time2, SnapshotName(2));
+ getReplCoord()->createSnapshot(opCtx.get(), time5, SnapshotName(3));
// ensure current snapshot follows price is right rules (closest but not greater than)
getReplCoord()->setMyLastAppliedOpTime(time3);
@@ -4589,11 +4595,11 @@ TEST_F(ReplCoordTest, DoNotAdvanceCommittedSnapshotWhenAnOpTimeIsNewerThanOurLat
OpTime time4(Timestamp(100, 4), 1);
OpTime time5(Timestamp(100, 5), 1);
OpTime time6(Timestamp(100, 6), 1);
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
- getReplCoord()->createSnapshot(txn.get(), time1, SnapshotName(1));
- getReplCoord()->createSnapshot(txn.get(), time2, SnapshotName(2));
- getReplCoord()->createSnapshot(txn.get(), time5, SnapshotName(3));
+ getReplCoord()->createSnapshot(opCtx.get(), time1, SnapshotName(1));
+ getReplCoord()->createSnapshot(opCtx.get(), time2, SnapshotName(2));
+ getReplCoord()->createSnapshot(opCtx.get(), time5, SnapshotName(3));
// ensure current snapshot will not advance beyond existing snapshots
getReplCoord()->setMyLastAppliedOpTime(time6);
@@ -4621,18 +4627,18 @@ TEST_F(ReplCoordTest,
OpTime time4(Timestamp(100, 4), 1);
OpTime time5(Timestamp(100, 5), 1);
OpTime time6(Timestamp(100, 6), 1);
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
- getReplCoord()->createSnapshot(txn.get(), time1, SnapshotName(1));
- getReplCoord()->createSnapshot(txn.get(), time2, SnapshotName(2));
- getReplCoord()->createSnapshot(txn.get(), time5, SnapshotName(3));
+ getReplCoord()->createSnapshot(opCtx.get(), time1, SnapshotName(1));
+ getReplCoord()->createSnapshot(opCtx.get(), time2, SnapshotName(2));
+ getReplCoord()->createSnapshot(opCtx.get(), time5, SnapshotName(3));
getReplCoord()->setMyLastAppliedOpTime(time6);
getReplCoord()->setMyLastDurableOpTime(time6);
ASSERT_EQUALS(time5, getReplCoord()->getCurrentCommittedSnapshotOpTime());
// ensure current snapshot updates on new snapshot if we are that far
- getReplCoord()->createSnapshot(txn.get(), time6, SnapshotName(4));
+ getReplCoord()->createSnapshot(opCtx.get(), time6, SnapshotName(4));
ASSERT_EQUALS(time6, getReplCoord()->getCurrentCommittedSnapshotOpTime());
}
@@ -4655,11 +4661,11 @@ TEST_F(ReplCoordTest, ZeroCommittedSnapshotWhenAllSnapshotsAreDropped) {
OpTime time4(Timestamp(100, 4), 1);
OpTime time5(Timestamp(100, 5), 1);
OpTime time6(Timestamp(100, 6), 1);
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
- getReplCoord()->createSnapshot(txn.get(), time1, SnapshotName(1));
- getReplCoord()->createSnapshot(txn.get(), time2, SnapshotName(2));
- getReplCoord()->createSnapshot(txn.get(), time5, SnapshotName(3));
+ getReplCoord()->createSnapshot(opCtx.get(), time1, SnapshotName(1));
+ getReplCoord()->createSnapshot(opCtx.get(), time2, SnapshotName(2));
+ getReplCoord()->createSnapshot(opCtx.get(), time5, SnapshotName(3));
// ensure dropping all snapshots should reset the current committed snapshot
getReplCoord()->dropAllSnapshots();
@@ -4681,9 +4687,9 @@ TEST_F(ReplCoordTest, DoNotAdvanceCommittedSnapshotWhenAppliedOpTimeChanges) {
OpTime time1(Timestamp(100, 1), 1);
OpTime time2(Timestamp(100, 2), 1);
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
- getReplCoord()->createSnapshot(txn.get(), time1, SnapshotName(1));
+ getReplCoord()->createSnapshot(opCtx.get(), time1, SnapshotName(1));
getReplCoord()->setMyLastAppliedOpTime(time1);
ASSERT_EQUALS(OpTime(), getReplCoord()->getCurrentCommittedSnapshotOpTime());
@@ -4842,13 +4848,13 @@ TEST_F(ReplCoordTest, NewStyleUpdatePositionCmdHasMetadata) {
OpTime optime(Timestamp(100, 2), 0);
getReplCoord()->setMyLastAppliedOpTime(optime);
getReplCoord()->setMyLastDurableOpTime(optime);
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
// Set last committed optime via metadata.
rpc::ReplSetMetadata syncSourceMetadata(optime.getTerm(), optime, optime, 1, OID(), -1, 1);
getReplCoord()->processReplSetMetadata(syncSourceMetadata);
getReplCoord()->advanceCommitPoint(optime);
- getReplCoord()->createSnapshot(txn.get(), optime, SnapshotName(1));
+ getReplCoord()->createSnapshot(opCtx.get(), optime, SnapshotName(1));
BSONObj cmd = unittest::assertGet(getReplCoord()->prepareReplSetUpdatePositionCommand(
ReplicationCoordinator::ReplSetUpdatePositionCommandStyle::kNewStyle));
@@ -5070,8 +5076,8 @@ TEST_F(ReplCoordTest, WaitForDrainFinish) {
ASSERT_EQUALS(ErrorCodes::BadValue, replCoord->waitForDrainFinish(Milliseconds(-1)));
- const auto txn = makeOperationContext();
- replCoord->signalDrainComplete(txn.get(), replCoord->getTerm());
+ const auto opCtx = makeOperationContext();
+ replCoord->signalDrainComplete(opCtx.get(), replCoord->getTerm());
ASSERT_OK(replCoord->waitForDrainFinish(timeout));
// Zero timeout is fine.
@@ -5346,7 +5352,7 @@ TEST_F(ReplCoordTest, NodeStoresElectionVotes) {
getReplCoord()->setMyLastDurableOpTime(time);
simulateSuccessfulV1Election();
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
ReplSetRequestVotesArgs args;
ASSERT_OK(args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
@@ -5363,11 +5369,11 @@ TEST_F(ReplCoordTest, NodeStoresElectionVotes) {
<< time.asOpTime().toBSON())));
ReplSetRequestVotesResponse response;
- ASSERT_OK(getReplCoord()->processReplSetRequestVotes(txn.get(), args, &response));
+ ASSERT_OK(getReplCoord()->processReplSetRequestVotes(opCtx.get(), args, &response));
ASSERT_EQUALS("", response.getReason());
ASSERT_TRUE(response.getVoteGranted());
- auto lastVote = getExternalState()->loadLocalLastVoteDocument(txn.get());
+ auto lastVote = getExternalState()->loadLocalLastVoteDocument(opCtx.get());
ASSERT_OK(lastVote.getStatus());
// This is not a dry-run election so the last vote should include the new term and candidate.
@@ -5400,7 +5406,7 @@ TEST_F(ReplCoordTest, NodeDoesNotStoreDryRunVotes) {
getReplCoord()->setMyLastDurableOpTime(time);
simulateSuccessfulV1Election();
- auto txn = makeOperationContext();
+ auto opCtx = makeOperationContext();
ReplSetRequestVotesArgs args;
ASSERT_OK(args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
@@ -5417,11 +5423,11 @@ TEST_F(ReplCoordTest, NodeDoesNotStoreDryRunVotes) {
<< time.asOpTime().toBSON())));
ReplSetRequestVotesResponse response;
- ASSERT_OK(getReplCoord()->processReplSetRequestVotes(txn.get(), args, &response));
+ ASSERT_OK(getReplCoord()->processReplSetRequestVotes(opCtx.get(), args, &response));
ASSERT_EQUALS("", response.getReason());
ASSERT_TRUE(response.getVoteGranted());
- auto lastVote = getExternalState()->loadLocalLastVoteDocument(txn.get());
+ auto lastVote = getExternalState()->loadLocalLastVoteDocument(opCtx.get());
ASSERT_OK(lastVote.getStatus());
// This is a dry-run election so the last vote should not be updated with the new term and
diff --git a/src/mongo/db/repl/replication_coordinator_mock.cpp b/src/mongo/db/repl/replication_coordinator_mock.cpp
index e72083ef012..27d62c0af1e 100644
--- a/src/mongo/db/repl/replication_coordinator_mock.cpp
+++ b/src/mongo/db/repl/replication_coordinator_mock.cpp
@@ -50,7 +50,7 @@ ReplicationCoordinatorMock::ReplicationCoordinatorMock(ServiceContext* service,
ReplicationCoordinatorMock::~ReplicationCoordinatorMock() {}
-void ReplicationCoordinatorMock::startup(OperationContext* txn) {
+void ReplicationCoordinatorMock::startup(OperationContext* opCtx) {
// TODO
}
@@ -97,18 +97,18 @@ Seconds ReplicationCoordinatorMock::getSlaveDelaySecs() const {
void ReplicationCoordinatorMock::clearSyncSourceBlacklist() {}
ReplicationCoordinator::StatusAndDuration ReplicationCoordinatorMock::awaitReplication(
- OperationContext* txn, const OpTime& opTime, const WriteConcernOptions& writeConcern) {
+ OperationContext* opCtx, const OpTime& opTime, const WriteConcernOptions& writeConcern) {
// TODO
return StatusAndDuration(Status::OK(), Milliseconds(0));
}
ReplicationCoordinator::StatusAndDuration
ReplicationCoordinatorMock::awaitReplicationOfLastOpForClient(
- OperationContext* txn, const WriteConcernOptions& writeConcern) {
+ OperationContext* opCtx, const WriteConcernOptions& writeConcern) {
return StatusAndDuration(Status::OK(), Milliseconds(0));
}
-Status ReplicationCoordinatorMock::stepDown(OperationContext* txn,
+Status ReplicationCoordinatorMock::stepDown(OperationContext* opCtx,
bool force,
const Milliseconds& waitTime,
const Milliseconds& stepdownTime) {
@@ -120,7 +120,7 @@ bool ReplicationCoordinatorMock::isMasterForReportingPurposes() {
return true;
}
-bool ReplicationCoordinatorMock::canAcceptWritesForDatabase(OperationContext* txn,
+bool ReplicationCoordinatorMock::canAcceptWritesForDatabase(OperationContext* opCtx,
StringData dbName) {
// Return true if we allow writes explicitly even when not in primary state, as in sharding
// unit tests, so that the op observers can fire but the tests don't have to set all the states
@@ -131,38 +131,38 @@ bool ReplicationCoordinatorMock::canAcceptWritesForDatabase(OperationContext* tx
return dbName == "local" || _memberState.primary() || _settings.isMaster();
}
-bool ReplicationCoordinatorMock::canAcceptWritesForDatabase_UNSAFE(OperationContext* txn,
+bool ReplicationCoordinatorMock::canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx,
StringData dbName) {
- return canAcceptWritesForDatabase(txn, dbName);
+ return canAcceptWritesForDatabase(opCtx, dbName);
}
-bool ReplicationCoordinatorMock::canAcceptWritesFor(OperationContext* txn,
+bool ReplicationCoordinatorMock::canAcceptWritesFor(OperationContext* opCtx,
const NamespaceString& ns) {
// TODO
- return canAcceptWritesForDatabase(txn, ns.db());
+ return canAcceptWritesForDatabase(opCtx, ns.db());
}
-bool ReplicationCoordinatorMock::canAcceptWritesFor_UNSAFE(OperationContext* txn,
+bool ReplicationCoordinatorMock::canAcceptWritesFor_UNSAFE(OperationContext* opCtx,
const NamespaceString& ns) {
- return canAcceptWritesFor(txn, ns);
+ return canAcceptWritesFor(opCtx, ns);
}
-Status ReplicationCoordinatorMock::checkCanServeReadsFor(OperationContext* txn,
+Status ReplicationCoordinatorMock::checkCanServeReadsFor(OperationContext* opCtx,
const NamespaceString& ns,
bool slaveOk) {
// TODO
return Status::OK();
}
-Status ReplicationCoordinatorMock::checkCanServeReadsFor_UNSAFE(OperationContext* txn,
+Status ReplicationCoordinatorMock::checkCanServeReadsFor_UNSAFE(OperationContext* opCtx,
const NamespaceString& ns,
bool slaveOk) {
- return checkCanServeReadsFor(txn, ns, slaveOk);
+ return checkCanServeReadsFor(opCtx, ns, slaveOk);
}
-bool ReplicationCoordinatorMock::shouldRelaxIndexConstraints(OperationContext* txn,
+bool ReplicationCoordinatorMock::shouldRelaxIndexConstraints(OperationContext* opCtx,
const NamespaceString& ns) {
- return !canAcceptWritesFor(txn, ns);
+ return !canAcceptWritesFor(opCtx, ns);
}
Status ReplicationCoordinatorMock::setLastOptimeForSlave(const OID& rid, const Timestamp& ts) {
@@ -205,7 +205,7 @@ OpTime ReplicationCoordinatorMock::getMyLastDurableOpTime() const {
return _myLastDurableOpTime;
}
-Status ReplicationCoordinatorMock::waitUntilOpTimeForRead(OperationContext* txn,
+Status ReplicationCoordinatorMock::waitUntilOpTimeForRead(OperationContext* opCtx,
const ReadConcernArgs& settings) {
return Status::OK();
}
@@ -242,7 +242,7 @@ Status ReplicationCoordinatorMock::waitForDrainFinish(Milliseconds timeout) {
void ReplicationCoordinatorMock::signalUpstreamUpdater() {}
-Status ReplicationCoordinatorMock::resyncData(OperationContext* txn, bool waitUntilCompleted) {
+Status ReplicationCoordinatorMock::resyncData(OperationContext* opCtx, bool waitUntilCompleted) {
return Status::OK();
}
@@ -297,7 +297,7 @@ bool ReplicationCoordinatorMock::getMaintenanceMode() {
return false;
}
-Status ReplicationCoordinatorMock::processReplSetSyncFrom(OperationContext* txn,
+Status ReplicationCoordinatorMock::processReplSetSyncFrom(OperationContext* opCtx,
const HostAndPort& target,
BSONObjBuilder* resultObj) {
// TODO
@@ -314,13 +314,13 @@ Status ReplicationCoordinatorMock::processHeartbeat(const ReplSetHeartbeatArgs&
return Status::OK();
}
-Status ReplicationCoordinatorMock::processReplSetReconfig(OperationContext* txn,
+Status ReplicationCoordinatorMock::processReplSetReconfig(OperationContext* opCtx,
const ReplSetReconfigArgs& args,
BSONObjBuilder* resultObj) {
return Status::OK();
}
-Status ReplicationCoordinatorMock::processReplSetInitiate(OperationContext* txn,
+Status ReplicationCoordinatorMock::processReplSetInitiate(OperationContext* opCtx,
const BSONObj& configObj,
BSONObjBuilder* resultObj) {
return Status::OK();
@@ -355,7 +355,7 @@ Status ReplicationCoordinatorMock::processReplSetUpdatePosition(const UpdatePosi
return Status::OK();
}
-Status ReplicationCoordinatorMock::processHandshake(OperationContext* txn,
+Status ReplicationCoordinatorMock::processHandshake(OperationContext* opCtx,
const HandshakeArgs& handshake) {
return Status::OK();
}
@@ -394,7 +394,7 @@ HostAndPort ReplicationCoordinatorMock::chooseNewSyncSource(const OpTime& lastOp
void ReplicationCoordinatorMock::blacklistSyncSource(const HostAndPort& host, Date_t until) {}
-void ReplicationCoordinatorMock::resetLastOpTimesFromOplog(OperationContext* txn) {
+void ReplicationCoordinatorMock::resetLastOpTimesFromOplog(OperationContext* opCtx) {
invariant(false);
}
@@ -410,7 +410,7 @@ OpTime ReplicationCoordinatorMock::getLastCommittedOpTime() const {
}
Status ReplicationCoordinatorMock::processReplSetRequestVotes(
- OperationContext* txn,
+ OperationContext* opCtx,
const ReplSetRequestVotesArgs& args,
ReplSetRequestVotesResponse* response) {
return Status::OK();
@@ -439,17 +439,17 @@ long long ReplicationCoordinatorMock::getTerm() {
return OpTime::kInitialTerm;
}
-Status ReplicationCoordinatorMock::updateTerm(OperationContext* txn, long long term) {
+Status ReplicationCoordinatorMock::updateTerm(OperationContext* opCtx, long long term) {
return Status::OK();
}
-SnapshotName ReplicationCoordinatorMock::reserveSnapshotName(OperationContext* txn) {
+SnapshotName ReplicationCoordinatorMock::reserveSnapshotName(OperationContext* opCtx) {
return SnapshotName(_snapshotNameGenerator.addAndFetch(1));
}
void ReplicationCoordinatorMock::forceSnapshotCreation() {}
-void ReplicationCoordinatorMock::createSnapshot(OperationContext* txn,
+void ReplicationCoordinatorMock::createSnapshot(OperationContext* opCtx,
OpTime timeOfSnapshot,
SnapshotName name){};
@@ -459,7 +459,7 @@ OpTime ReplicationCoordinatorMock::getCurrentCommittedSnapshotOpTime() const {
return OpTime();
}
-void ReplicationCoordinatorMock::waitUntilSnapshotCommitted(OperationContext* txn,
+void ReplicationCoordinatorMock::waitUntilSnapshotCommitted(OperationContext* opCtx,
const SnapshotName& untilSnapshot) {}
size_t ReplicationCoordinatorMock::getNumUncommittedSnapshots() {
diff --git a/src/mongo/db/repl/replication_coordinator_mock.h b/src/mongo/db/repl/replication_coordinator_mock.h
index 4b3fd99d3ce..0e9109b1665 100644
--- a/src/mongo/db/repl/replication_coordinator_mock.h
+++ b/src/mongo/db/repl/replication_coordinator_mock.h
@@ -53,9 +53,9 @@ public:
ReplicationCoordinatorMock(ServiceContext* service, const ReplSettings& settings);
virtual ~ReplicationCoordinatorMock();
- virtual void startup(OperationContext* txn);
+ virtual void startup(OperationContext* opCtx);
- virtual void shutdown(OperationContext* txn);
+ virtual void shutdown(OperationContext* opCtx);
virtual ReplicationExecutor* getExecutor() override {
return nullptr;
@@ -78,36 +78,36 @@ public:
virtual void clearSyncSourceBlacklist();
virtual ReplicationCoordinator::StatusAndDuration awaitReplication(
- OperationContext* txn, const OpTime& opTime, const WriteConcernOptions& writeConcern);
+ OperationContext* opCtx, const OpTime& opTime, const WriteConcernOptions& writeConcern);
virtual ReplicationCoordinator::StatusAndDuration awaitReplicationOfLastOpForClient(
- OperationContext* txn, const WriteConcernOptions& writeConcern);
+ OperationContext* opCtx, const WriteConcernOptions& writeConcern);
- virtual Status stepDown(OperationContext* txn,
+ virtual Status stepDown(OperationContext* opCtx,
bool force,
const Milliseconds& waitTime,
const Milliseconds& stepdownTime);
virtual bool isMasterForReportingPurposes();
- virtual bool canAcceptWritesForDatabase(OperationContext* txn, StringData dbName);
+ virtual bool canAcceptWritesForDatabase(OperationContext* opCtx, StringData dbName);
- virtual bool canAcceptWritesForDatabase_UNSAFE(OperationContext* txn, StringData dbName);
+ virtual bool canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx, StringData dbName);
- bool canAcceptWritesFor(OperationContext* txn, const NamespaceString& ns) override;
+ bool canAcceptWritesFor(OperationContext* opCtx, const NamespaceString& ns) override;
- bool canAcceptWritesFor_UNSAFE(OperationContext* txn, const NamespaceString& ns) override;
+ bool canAcceptWritesFor_UNSAFE(OperationContext* opCtx, const NamespaceString& ns) override;
virtual Status checkIfWriteConcernCanBeSatisfied(const WriteConcernOptions& writeConcern) const;
- virtual Status checkCanServeReadsFor(OperationContext* txn,
+ virtual Status checkCanServeReadsFor(OperationContext* opCtx,
const NamespaceString& ns,
bool slaveOk);
- virtual Status checkCanServeReadsFor_UNSAFE(OperationContext* txn,
+ virtual Status checkCanServeReadsFor_UNSAFE(OperationContext* opCtx,
const NamespaceString& ns,
bool slaveOk);
- virtual bool shouldRelaxIndexConstraints(OperationContext* txn, const NamespaceString& ns);
+ virtual bool shouldRelaxIndexConstraints(OperationContext* opCtx, const NamespaceString& ns);
virtual Status setLastOptimeForSlave(const OID& rid, const Timestamp& ts);
@@ -124,7 +124,7 @@ public:
virtual OpTime getMyLastAppliedOpTime() const;
virtual OpTime getMyLastDurableOpTime() const;
- virtual Status waitUntilOpTimeForRead(OperationContext* txn,
+ virtual Status waitUntilOpTimeForRead(OperationContext* opCtx,
const ReadConcernArgs& settings) override;
virtual OID getElectionId();
@@ -143,7 +143,7 @@ public:
virtual void signalUpstreamUpdater();
- virtual Status resyncData(OperationContext* txn, bool waitUntilCompleted) override;
+ virtual Status resyncData(OperationContext* opCtx, bool waitUntilCompleted) override;
virtual StatusWith<BSONObj> prepareReplSetUpdatePositionCommand(
ReplSetUpdatePositionCommandStyle commandStyle) const override;
@@ -170,7 +170,7 @@ public:
virtual bool getMaintenanceMode();
- virtual Status processReplSetSyncFrom(OperationContext* txn,
+ virtual Status processReplSetSyncFrom(OperationContext* opCtx,
const HostAndPort& target,
BSONObjBuilder* resultObj);
@@ -179,11 +179,11 @@ public:
virtual Status processHeartbeat(const ReplSetHeartbeatArgs& args,
ReplSetHeartbeatResponse* response);
- virtual Status processReplSetReconfig(OperationContext* txn,
+ virtual Status processReplSetReconfig(OperationContext* opCtx,
const ReplSetReconfigArgs& args,
BSONObjBuilder* resultObj);
- virtual Status processReplSetInitiate(OperationContext* txn,
+ virtual Status processReplSetInitiate(OperationContext* opCtx,
const BSONObj& configObj,
BSONObjBuilder* resultObj);
@@ -200,7 +200,7 @@ public:
virtual Status processReplSetUpdatePosition(const UpdatePositionArgs& updates,
long long* configVersion);
- virtual Status processHandshake(OperationContext* txn, const HandshakeArgs& handshake);
+ virtual Status processHandshake(OperationContext* opCtx, const HandshakeArgs& handshake);
virtual bool buildsIndexes();
@@ -216,7 +216,7 @@ public:
virtual void blacklistSyncSource(const HostAndPort& host, Date_t until);
- virtual void resetLastOpTimesFromOplog(OperationContext* txn);
+ virtual void resetLastOpTimesFromOplog(OperationContext* opCtx);
virtual bool shouldChangeSyncSource(const HostAndPort& currentSource,
const rpc::ReplSetMetadata& replMetadata,
@@ -224,7 +224,7 @@ public:
virtual OpTime getLastCommittedOpTime() const;
- virtual Status processReplSetRequestVotes(OperationContext* txn,
+ virtual Status processReplSetRequestVotes(OperationContext* opCtx,
const ReplSetRequestVotesArgs& args,
ReplSetRequestVotesResponse* response);
@@ -243,13 +243,13 @@ public:
virtual long long getTerm();
- virtual Status updateTerm(OperationContext* txn, long long term);
+ virtual Status updateTerm(OperationContext* opCtx, long long term);
- virtual SnapshotName reserveSnapshotName(OperationContext* txn);
+ virtual SnapshotName reserveSnapshotName(OperationContext* opCtx);
virtual void forceSnapshotCreation() override;
- virtual void createSnapshot(OperationContext* txn,
+ virtual void createSnapshot(OperationContext* opCtx,
OpTime timeOfSnapshot,
SnapshotName name) override;
@@ -257,7 +257,7 @@ public:
virtual OpTime getCurrentCommittedSnapshotOpTime() const override;
- virtual void waitUntilSnapshotCommitted(OperationContext* txn,
+ virtual void waitUntilSnapshotCommitted(OperationContext* opCtx,
const SnapshotName& untilSnapshot) override;
virtual size_t getNumUncommittedSnapshots() override;
diff --git a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
index 43dff9b4c06..945455d4512 100644
--- a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
+++ b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
@@ -89,8 +89,8 @@ void ReplCoordTest::tearDown() {
_externalState->setStoreLocalConfigDocumentToHang(false);
}
if (_callShutdown) {
- auto txn = makeOperationContext();
- shutdown(txn.get());
+ auto opCtx = makeOperationContext();
+ shutdown(opCtx.get());
}
}
@@ -165,8 +165,8 @@ void ReplCoordTest::start() {
init();
}
- const auto txn = makeOperationContext();
- _repl->startup(txn.get());
+ const auto opCtx = makeOperationContext();
+ _repl->startup(opCtx.get());
_repl->waitForStartUpComplete_forTest();
_callShutdown = true;
}
@@ -362,8 +362,8 @@ void ReplCoordTest::simulateSuccessfulV1ElectionAt(Date_t electionTime) {
ASSERT_FALSE(imResponse.isMaster()) << imResponse.toBSON().toString();
ASSERT_TRUE(imResponse.isSecondary()) << imResponse.toBSON().toString();
{
- auto txn = makeOperationContext();
- replCoord->signalDrainComplete(txn.get(), replCoord->getTerm());
+ auto opCtx = makeOperationContext();
+ replCoord->signalDrainComplete(opCtx.get(), replCoord->getTerm());
}
ASSERT(replCoord->getApplierState() == ReplicationCoordinator::ApplierState::Stopped);
replCoord->fillIsMasterForReplSet(&imResponse);
@@ -425,8 +425,8 @@ void ReplCoordTest::simulateSuccessfulElection() {
ASSERT_FALSE(imResponse.isMaster()) << imResponse.toBSON().toString();
ASSERT_TRUE(imResponse.isSecondary()) << imResponse.toBSON().toString();
{
- auto txn = makeOperationContext();
- replCoord->signalDrainComplete(txn.get(), replCoord->getTerm());
+ auto opCtx = makeOperationContext();
+ replCoord->signalDrainComplete(opCtx.get(), replCoord->getTerm());
}
replCoord->fillIsMasterForReplSet(&imResponse);
ASSERT_TRUE(imResponse.isMaster()) << imResponse.toBSON().toString();
@@ -435,10 +435,10 @@ void ReplCoordTest::simulateSuccessfulElection() {
ASSERT(replCoord->getMemberState().primary()) << replCoord->getMemberState().toString();
}
-void ReplCoordTest::shutdown(OperationContext* txn) {
+void ReplCoordTest::shutdown(OperationContext* opCtx) {
invariant(_callShutdown);
_net->exitNetwork();
- _repl->shutdown(txn);
+ _repl->shutdown(opCtx);
_callShutdown = false;
}
diff --git a/src/mongo/db/repl/replication_coordinator_test_fixture.h b/src/mongo/db/repl/replication_coordinator_test_fixture.h
index b9b05e46172..5ffab24a8ef 100644
--- a/src/mongo/db/repl/replication_coordinator_test_fixture.h
+++ b/src/mongo/db/repl/replication_coordinator_test_fixture.h
@@ -233,7 +233,7 @@ protected:
/**
* Shuts down the objects under test.
*/
- void shutdown(OperationContext* txn);
+ void shutdown(OperationContext* opCtx);
/**
* Receive the heartbeat request from replication coordinator and reply with a response.
diff --git a/src/mongo/db/repl/replication_executor.cpp b/src/mongo/db/repl/replication_executor.cpp
index f4070ac5d9e..3f9f0c5836b 100644
--- a/src/mongo/db/repl/replication_executor.cpp
+++ b/src/mongo/db/repl/replication_executor.cpp
@@ -404,8 +404,8 @@ StatusWith<ReplicationExecutor::CallbackHandle> ReplicationExecutor::scheduleDBW
handle.getValue(),
&_dbWorkInProgressQueue,
nullptr);
- auto task = [doOp](OperationContext* txn, const Status& status) {
- makeNoExcept(stdx::bind(doOp, txn, status))();
+ auto task = [doOp](OperationContext* opCtx, const Status& status) {
+ makeNoExcept(stdx::bind(doOp, opCtx, status))();
return TaskRunner::NextAction::kDisposeOperationContext;
};
if (mode == MODE_NONE && nss.ns().empty()) {
@@ -418,7 +418,7 @@ StatusWith<ReplicationExecutor::CallbackHandle> ReplicationExecutor::scheduleDBW
return handle;
}
-void ReplicationExecutor::_doOperation(OperationContext* txn,
+void ReplicationExecutor::_doOperation(OperationContext* opCtx,
const Status& taskRunnerStatus,
const CallbackHandle& cbHandle,
WorkQueue* workQueue,
@@ -442,7 +442,7 @@ void ReplicationExecutor::_doOperation(OperationContext* txn,
(callback->_isCanceled || !taskRunnerStatus.isOK()
? Status(ErrorCodes::CallbackCanceled, "Callback canceled")
: Status::OK()),
- txn));
+ opCtx));
}
lk.lock();
signalEvent_inlock(callback->_finishedEvent);
@@ -461,8 +461,8 @@ ReplicationExecutor::scheduleWorkWithGlobalExclusiveLock(const CallbackFn& work)
&_exclusiveLockInProgressQueue,
&_terribleExLockSyncMutex);
_dblockExclusiveLockTaskRunner.schedule(DatabaseTask::makeGlobalExclusiveLockTask(
- [doOp](OperationContext* txn, const Status& status) {
- makeNoExcept(stdx::bind(doOp, txn, status))();
+ [doOp](OperationContext* opCtx, const Status& status) {
+ makeNoExcept(stdx::bind(doOp, opCtx, status))();
return TaskRunner::NextAction::kDisposeOperationContext;
}));
}
diff --git a/src/mongo/db/repl/replication_executor.h b/src/mongo/db/repl/replication_executor.h
index 26f8e522317..c5e20a8ac68 100644
--- a/src/mongo/db/repl/replication_executor.h
+++ b/src/mongo/db/repl/replication_executor.h
@@ -280,7 +280,7 @@ private:
* Executes the callback referenced by "cbHandle", and moves the underlying
* WorkQueue::iterator from "workQueue" into the _freeQueue.
*
- * "txn" is a pointer to the OperationContext.
+ * "opCtx" is a pointer to the OperationContext.
*
* "status" is the callback status from the task runner. Only possible values are
* Status::OK and ErrorCodes::CallbackCanceled (when task runner is canceled).
@@ -288,7 +288,7 @@ private:
* If "terribleExLockSyncMutex" is not null, serializes execution of "cbHandle" with the
* execution of other callbacks.
*/
- void _doOperation(OperationContext* txn,
+ void _doOperation(OperationContext* opCtx,
const Status& taskRunnerStatus,
const CallbackHandle& cbHandle,
WorkQueue* workQueue,
diff --git a/src/mongo/db/repl/replication_executor_test.cpp b/src/mongo/db/repl/replication_executor_test.cpp
index e630a4a2e30..831259951b4 100644
--- a/src/mongo/db/repl/replication_executor_test.cpp
+++ b/src/mongo/db/repl/replication_executor_test.cpp
@@ -72,12 +72,12 @@ TEST_F(ReplicationExecutorTest, ScheduleDBWorkAndExclusiveWorkConcurrently) {
NamespaceString nss("mydb", "mycoll");
ReplicationExecutor& executor = getReplExecutor();
Status status1 = getDetectableErrorStatus();
- OperationContext* txn = nullptr;
+ OperationContext* opCtx = nullptr;
using CallbackData = ReplicationExecutor::CallbackArgs;
ASSERT_OK(executor
.scheduleDBWork([&](const CallbackData& cbData) {
status1 = cbData.status;
- txn = cbData.txn;
+ opCtx = cbData.opCtx;
barrier.countDownAndWait();
if (cbData.status != ErrorCodes::CallbackCanceled)
cbData.executor->shutdown();
@@ -90,23 +90,23 @@ TEST_F(ReplicationExecutorTest, ScheduleDBWorkAndExclusiveWorkConcurrently) {
executor.startup();
executor.join();
ASSERT_OK(status1);
- ASSERT(txn);
+ ASSERT(opCtx);
}
TEST_F(ReplicationExecutorTest, ScheduleDBWorkWithCollectionLock) {
NamespaceString nss("mydb", "mycoll");
ReplicationExecutor& executor = getReplExecutor();
Status status1 = getDetectableErrorStatus();
- OperationContext* txn = nullptr;
+ OperationContext* opCtx = nullptr;
bool collectionIsLocked = false;
using CallbackData = ReplicationExecutor::CallbackArgs;
ASSERT_OK(executor
.scheduleDBWork(
[&](const CallbackData& cbData) {
status1 = cbData.status;
- txn = cbData.txn;
- collectionIsLocked = txn
- ? txn->lockState()->isCollectionLockedForMode(nss.ns(), MODE_X)
+ opCtx = cbData.opCtx;
+ collectionIsLocked = opCtx
+ ? opCtx->lockState()->isCollectionLockedForMode(nss.ns(), MODE_X)
: false;
if (cbData.status != ErrorCodes::CallbackCanceled)
cbData.executor->shutdown();
@@ -117,21 +117,21 @@ TEST_F(ReplicationExecutorTest, ScheduleDBWorkWithCollectionLock) {
executor.startup();
executor.join();
ASSERT_OK(status1);
- ASSERT(txn);
+ ASSERT(opCtx);
ASSERT_TRUE(collectionIsLocked);
}
TEST_F(ReplicationExecutorTest, ScheduleExclusiveLockOperation) {
ReplicationExecutor& executor = getReplExecutor();
Status status1 = getDetectableErrorStatus();
- OperationContext* txn = nullptr;
+ OperationContext* opCtx = nullptr;
bool lockIsW = false;
using CallbackData = ReplicationExecutor::CallbackArgs;
ASSERT_OK(executor
.scheduleWorkWithGlobalExclusiveLock([&](const CallbackData& cbData) {
status1 = cbData.status;
- txn = cbData.txn;
- lockIsW = txn ? txn->lockState()->isW() : false;
+ opCtx = cbData.opCtx;
+ lockIsW = opCtx ? opCtx->lockState()->isW() : false;
if (cbData.status != ErrorCodes::CallbackCanceled)
cbData.executor->shutdown();
})
@@ -139,7 +139,7 @@ TEST_F(ReplicationExecutorTest, ScheduleExclusiveLockOperation) {
executor.startup();
executor.join();
ASSERT_OK(status1);
- ASSERT(txn);
+ ASSERT(opCtx);
ASSERT_TRUE(lockIsW);
}
diff --git a/src/mongo/db/repl/replication_info.cpp b/src/mongo/db/repl/replication_info.cpp
index c75c7b38880..e0a49f5849f 100644
--- a/src/mongo/db/repl/replication_info.cpp
+++ b/src/mongo/db/repl/replication_info.cpp
@@ -66,7 +66,7 @@ using std::stringstream;
namespace repl {
-void appendReplicationInfo(OperationContext* txn, BSONObjBuilder& result, int level) {
+void appendReplicationInfo(OperationContext* opCtx, BSONObjBuilder& result, int level) {
ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
if (replCoord->getSettings().usingReplSets()) {
IsMasterResponse isMasterResponse;
@@ -95,9 +95,9 @@ void appendReplicationInfo(OperationContext* txn, BSONObjBuilder& result, int le
list<BSONObj> src;
{
const NamespaceString localSources{"local.sources"};
- AutoGetCollectionForRead ctx(txn, localSources);
+ AutoGetCollectionForRead ctx(opCtx, localSources);
unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(
- txn, localSources.ns(), ctx.getCollection(), PlanExecutor::YIELD_MANUAL));
+ opCtx, localSources.ns(), ctx.getCollection(), PlanExecutor::YIELD_MANUAL));
BSONObj obj;
PlanExecutor::ExecState state;
while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
@@ -124,7 +124,7 @@ void appendReplicationInfo(OperationContext* txn, BSONObjBuilder& result, int le
}
if (level > 1) {
- wassert(!txn->lockState()->isLocked());
+ wassert(!opCtx->lockState()->isLocked());
// note: there is no so-style timeout on this connection; perhaps we should have
// one.
ScopedDbConnection conn(s["host"].valuestr());
@@ -159,7 +159,7 @@ public:
return true;
}
- BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ BSONObj generateSection(OperationContext* opCtx, const BSONElement& configElement) const {
if (!getGlobalReplicationCoordinator()->isReplEnabled()) {
return BSONObj();
}
@@ -167,7 +167,7 @@ public:
int level = configElement.numberInt();
BSONObjBuilder result;
- appendReplicationInfo(txn, result, level);
+ appendReplicationInfo(opCtx, result, level);
getGlobalReplicationCoordinator()->processReplSetGetRBID(&result);
return result.obj();
@@ -182,7 +182,7 @@ public:
return false;
}
- BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ BSONObj generateSection(OperationContext* opCtx, const BSONElement& configElement) const {
ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
if (!replCoord->isReplEnabled()) {
return BSONObj();
@@ -199,7 +199,7 @@ public:
BSONObj o;
uassert(17347,
"Problem reading earliest entry from oplog",
- Helpers::getSingleton(txn, oplogNS.c_str(), o));
+ Helpers::getSingleton(opCtx, oplogNS.c_str(), o));
result.append("earliestOptime", o["ts"].timestamp());
return result.obj();
}
@@ -225,7 +225,7 @@ public:
const BSONObj& cmdObj,
std::vector<Privilege>* out) {} // No auth required
CmdIsMaster() : Command("isMaster", true, "ismaster") {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
@@ -235,20 +235,20 @@ public:
authenticated.
*/
if (cmdObj["forShell"].trueValue()) {
- LastError::get(txn->getClient()).disable();
+ LastError::get(opCtx->getClient()).disable();
}
// Tag connections to avoid closing them on stepdown.
auto hangUpElement = cmdObj["hangUpOnStepDown"];
if (!hangUpElement.eoo() && !hangUpElement.trueValue()) {
- auto session = txn->getClient()->session();
+ auto session = opCtx->getClient()->session();
if (session) {
session->replaceTags(session->getTags() |
executor::NetworkInterface::kMessagingPortKeepOpen);
}
}
- auto& clientMetadataIsMasterState = ClientMetadataIsMasterState::get(txn->getClient());
+ auto& clientMetadataIsMasterState = ClientMetadataIsMasterState::get(opCtx->getClient());
bool seenIsMaster = clientMetadataIsMasterState.hasSeenIsMaster();
if (!seenIsMaster) {
clientMetadataIsMasterState.setSeenIsMaster();
@@ -271,13 +271,13 @@ public:
invariant(swParseClientMetadata.getValue());
- swParseClientMetadata.getValue().get().logClientMetadata(txn->getClient());
+ swParseClientMetadata.getValue().get().logClientMetadata(opCtx->getClient());
clientMetadataIsMasterState.setClientMetadata(
- txn->getClient(), std::move(swParseClientMetadata.getValue()));
+ opCtx->getClient(), std::move(swParseClientMetadata.getValue()));
}
- appendReplicationInfo(txn, result, 0);
+ appendReplicationInfo(opCtx, result, 0);
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
// If we have feature compatibility version 3.4, use a config server mode that 3.2
@@ -302,10 +302,10 @@ public:
"automationServiceDescriptor",
static_cast<ServerParameter*>(nullptr));
if (parameter)
- parameter->append(txn, result, "automationServiceDescriptor");
+ parameter->append(opCtx, result, "automationServiceDescriptor");
- if (txn->getClient()->session()) {
- MessageCompressorManager::forSession(txn->getClient()->session())
+ if (opCtx->getClient()->session()) {
+ MessageCompressorManager::forSession(opCtx->getClient()->session())
.serverNegotiate(cmdObj, &result);
}
diff --git a/src/mongo/db/repl/resync.cpp b/src/mongo/db/repl/resync.cpp
index e45022440a3..848700d215a 100644
--- a/src/mongo/db/repl/resync.cpp
+++ b/src/mongo/db/repl/resync.cpp
@@ -72,7 +72,7 @@ public:
}
CmdResync() : Command(kResyncFieldName) {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -100,16 +100,16 @@ public:
return appendCommandStatus(
result, Status(ErrorCodes::NotSecondary, "primaries cannot resync"));
}
- uassertStatusOKWithLocation(replCoord->resyncData(txn, waitForResync), "resync", 0);
+ uassertStatusOKWithLocation(replCoord->resyncData(opCtx, waitForResync), "resync", 0);
return true;
}
// Master/Slave resync.
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite globalWriteLock(txn->lockState());
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite globalWriteLock(opCtx->lockState());
// below this comment pertains only to master/slave replication
if (cmdObj.getBoolField("force")) {
- if (!waitForSyncToFinish(txn, errmsg))
+ if (!waitForSyncToFinish(opCtx, errmsg))
return false;
replAllDead = "resync forced";
}
@@ -118,16 +118,16 @@ public:
errmsg = "not dead, no need to resync";
return false;
}
- if (!waitForSyncToFinish(txn, errmsg))
+ if (!waitForSyncToFinish(opCtx, errmsg))
return false;
- ReplSource::forceResyncDead(txn, "client");
+ ReplSource::forceResyncDead(opCtx, "client");
result.append("info", "triggered resync for all sources");
return true;
}
- bool waitForSyncToFinish(OperationContext* txn, string& errmsg) const {
+ bool waitForSyncToFinish(OperationContext* opCtx, string& errmsg) const {
// Wait for slave thread to finish syncing, so sources will be be
// reloaded with new saved state on next pass.
Timer t;
@@ -135,7 +135,7 @@ public:
if (syncing.load() == 0 || t.millis() > 30000)
break;
{
- Lock::TempRelease t(txn->lockState());
+ Lock::TempRelease t(opCtx->lockState());
relinquishSyncingSome.store(1);
sleepmillis(1);
}
diff --git a/src/mongo/db/repl/rollback_source.h b/src/mongo/db/repl/rollback_source.h
index 3e8d6f55578..4e068d336ce 100644
--- a/src/mongo/db/repl/rollback_source.h
+++ b/src/mongo/db/repl/rollback_source.h
@@ -76,7 +76,7 @@ public:
/**
* Clones a single collection from the sync source.
*/
- virtual void copyCollectionFromRemote(OperationContext* txn,
+ virtual void copyCollectionFromRemote(OperationContext* opCtx,
const NamespaceString& nss) const = 0;
/**
diff --git a/src/mongo/db/repl/rollback_source_impl.cpp b/src/mongo/db/repl/rollback_source_impl.cpp
index 27513953dca..226edcc0a63 100644
--- a/src/mongo/db/repl/rollback_source_impl.cpp
+++ b/src/mongo/db/repl/rollback_source_impl.cpp
@@ -68,7 +68,7 @@ BSONObj RollbackSourceImpl::findOne(const NamespaceString& nss, const BSONObj& f
return _getConnection()->findOne(nss.toString(), filter, NULL, QueryOption_SlaveOk).getOwned();
}
-void RollbackSourceImpl::copyCollectionFromRemote(OperationContext* txn,
+void RollbackSourceImpl::copyCollectionFromRemote(OperationContext* opCtx,
const NamespaceString& nss) const {
std::string errmsg;
std::unique_ptr<DBClientConnection> tmpConn(new DBClientConnection());
@@ -82,7 +82,7 @@ void RollbackSourceImpl::copyCollectionFromRemote(OperationContext* txn,
uassert(15909,
str::stream() << "replSet rollback error resyncing collection " << nss.ns() << ' '
<< errmsg,
- cloner.copyCollection(txn, nss.ns(), BSONObj(), errmsg, true));
+ cloner.copyCollection(opCtx, nss.ns(), BSONObj(), errmsg, true));
}
StatusWith<BSONObj> RollbackSourceImpl::getCollectionInfo(const NamespaceString& nss) const {
diff --git a/src/mongo/db/repl/rollback_source_impl.h b/src/mongo/db/repl/rollback_source_impl.h
index fe4b7a8aad1..55f9237949b 100644
--- a/src/mongo/db/repl/rollback_source_impl.h
+++ b/src/mongo/db/repl/rollback_source_impl.h
@@ -64,7 +64,8 @@ public:
BSONObj findOne(const NamespaceString& nss, const BSONObj& filter) const override;
- void copyCollectionFromRemote(OperationContext* txn, const NamespaceString& nss) const override;
+ void copyCollectionFromRemote(OperationContext* opCtx,
+ const NamespaceString& nss) const override;
StatusWith<BSONObj> getCollectionInfo(const NamespaceString& nss) const override;
diff --git a/src/mongo/db/repl/rs_initialsync.cpp b/src/mongo/db/repl/rs_initialsync.cpp
index 073afcc73ec..a6a5cf78baf 100644
--- a/src/mongo/db/repl/rs_initialsync.cpp
+++ b/src/mongo/db/repl/rs_initialsync.cpp
@@ -84,16 +84,16 @@ MONGO_EXPORT_SERVER_PARAMETER(num3Dot2InitialSyncAttempts, int, 10);
* Also resets the bgsync thread so that it reconnects its sync source after the oplog has been
* truncated.
*/
-void truncateAndResetOplog(OperationContext* txn,
+void truncateAndResetOplog(OperationContext* opCtx,
ReplicationCoordinator* replCoord,
BackgroundSync* bgsync) {
// Add field to minvalid document to tell us to restart initial sync if we crash
- StorageInterface::get(txn)->setInitialSyncFlag(txn);
+ StorageInterface::get(opCtx)->setInitialSyncFlag(opCtx);
- AutoGetDb autoDb(txn, "local", MODE_X);
+ AutoGetDb autoDb(opCtx, "local", MODE_X);
massert(28585, "no local database found", autoDb.getDb());
- invariant(txn->lockState()->isCollectionLockedForMode(rsOplogName, MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(rsOplogName, MODE_X));
// Note: the following order is important.
// The bgsync thread uses an empty optime as a sentinel to know to wait
// for initial sync; thus, we must
@@ -104,7 +104,7 @@ void truncateAndResetOplog(OperationContext* txn,
replCoord->resetMyLastOpTimes();
bgsync->stop(true);
bgsync->startProducerIfStopped();
- bgsync->clearBuffer(txn);
+ bgsync->clearBuffer(opCtx);
replCoord->clearSyncSourceBlacklist();
@@ -112,15 +112,15 @@ void truncateAndResetOplog(OperationContext* txn,
Collection* collection = autoDb.getDb()->getCollection(rsOplogName);
fassert(28565, collection);
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(txn);
- Status status = collection->truncate(txn);
+ WriteUnitOfWork wunit(opCtx);
+ Status status = collection->truncate(opCtx);
fassert(28564, status);
wunit.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "truncate", collection->ns().ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "truncate", collection->ns().ns());
}
-bool _initialSyncClone(OperationContext* txn,
+bool _initialSyncClone(OperationContext* opCtx,
Cloner& cloner,
const std::string& host,
const std::string& db,
@@ -144,10 +144,10 @@ bool _initialSyncClone(OperationContext* txn,
options.createCollections = false;
// Make database stable
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbWrite(txn->lockState(), db, MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock dbWrite(opCtx->lockState(), db, MODE_X);
- Status status = cloner.copyDb(txn, db, host, options, nullptr, collections);
+ Status status = cloner.copyDb(opCtx, db, host, options, nullptr, collections);
if (!status.isOK()) {
log() << "initial sync: error while " << (dataPass ? "cloning " : "indexing ") << db
<< ". " << redact(status);
@@ -155,7 +155,7 @@ bool _initialSyncClone(OperationContext* txn,
}
if (dataPass && (db == "admin")) {
- fassertNoTrace(28619, checkAdminDatabase(txn, dbHolder().get(txn, db)));
+ fassertNoTrace(28619, checkAdminDatabase(opCtx, dbHolder().get(opCtx, db)));
}
return true;
}
@@ -167,7 +167,7 @@ bool _initialSyncClone(OperationContext* txn,
* @param r the oplog reader.
* @return if applying the oplog succeeded.
*/
-bool _initialSyncApplyOplog(OperationContext* txn,
+bool _initialSyncApplyOplog(OperationContext* opCtx,
repl::InitialSync* syncer,
OplogReader* r,
BackgroundSync* bgsync) {
@@ -178,7 +178,7 @@ bool _initialSyncApplyOplog(OperationContext* txn,
if (MONGO_FAIL_POINT(failInitSyncWithBufferedEntriesLeft)) {
log() << "adding fake oplog entry to buffer.";
bgsync->pushTestOpToBuffer(
- txn,
+ opCtx,
BSON("ts" << startOpTime.getTimestamp() << "t" << startOpTime.getTerm() << "v" << 1
<< "op"
<< "n"));
@@ -222,7 +222,7 @@ bool _initialSyncApplyOplog(OperationContext* txn,
// apply till stopOpTime
try {
LOG(2) << "Applying oplog entries from " << startOpTime << " until " << stopOpTime;
- syncer->oplogApplication(txn, stopOpTime);
+ syncer->oplogApplication(opCtx, stopOpTime);
if (globalInShutdownDeprecated()) {
return false;
@@ -262,15 +262,15 @@ bool _initialSyncApplyOplog(OperationContext* txn,
* ErrorCode::InitialSyncOplogSourceMissing if the node fails to find an sync source, Status::OK
* if everything worked, and ErrorCode::InitialSyncFailure for all other error cases.
*/
-Status _initialSync(OperationContext* txn, BackgroundSync* bgsync) {
+Status _initialSync(OperationContext* opCtx, BackgroundSync* bgsync) {
log() << "initial sync pending";
- txn->setReplicatedWrites(false);
- DisableDocumentValidation validationDisabler(txn);
+ opCtx->setReplicatedWrites(false);
+ DisableDocumentValidation validationDisabler(opCtx);
ReplicationCoordinator* replCoord(getGlobalReplicationCoordinator());
// reset state for initial sync
- truncateAndResetOplog(txn, replCoord, bgsync);
+ truncateAndResetOplog(opCtx, replCoord, bgsync);
OplogReader r;
@@ -278,7 +278,7 @@ Status _initialSync(OperationContext* txn, BackgroundSync* bgsync) {
while (r.getHost().empty()) {
// We must prime the sync source selector so that it considers all candidates regardless
// of oplog position, by passing in null OpTime as the last op fetched time.
- r.connectToSyncSource(txn, OpTime(), OpTime(), replCoord);
+ r.connectToSyncSource(opCtx, OpTime(), OpTime(), replCoord);
if (r.getHost().empty()) {
std::string msg =
@@ -306,7 +306,7 @@ Status _initialSync(OperationContext* txn, BackgroundSync* bgsync) {
}
log() << "initial sync drop all databases";
- dropAllDatabasesExceptLocal(txn);
+ dropAllDatabasesExceptLocal(opCtx);
if (MONGO_FAIL_POINT(initialSyncHangBeforeCopyingDatabases)) {
log() << "initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled. "
@@ -360,17 +360,17 @@ Status _initialSync(OperationContext* txn, BackgroundSync* bgsync) {
createCollectionParams.push_back(params);
}
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbWrite(txn->lockState(), db, MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock dbWrite(opCtx->lockState(), db, MODE_X);
- auto createStatus = cloner.createCollectionsForDb(txn, createCollectionParams, db);
+ auto createStatus = cloner.createCollectionsForDb(opCtx, createCollectionParams, db);
if (!createStatus.isOK()) {
return createStatus;
}
collectionsPerDb.emplace(db, std::move(collections));
}
for (auto&& dbCollsPair : collectionsPerDb) {
- if (!_initialSyncClone(txn,
+ if (!_initialSyncClone(opCtx,
cloner,
r.conn()->getServerAddress(),
dbCollsPair.first,
@@ -385,15 +385,15 @@ Status _initialSync(OperationContext* txn, BackgroundSync* bgsync) {
// prime oplog, but don't need to actually apply the op as the cloned data already reflects it.
fassertStatusOK(
40142,
- StorageInterface::get(txn)->insertDocument(txn, NamespaceString(rsOplogName), lastOp));
+ StorageInterface::get(opCtx)->insertDocument(opCtx, NamespaceString(rsOplogName), lastOp));
OpTime lastOptime = OplogEntry(lastOp).getOpTime();
- ReplClientInfo::forClient(txn->getClient()).setLastOp(lastOptime);
+ ReplClientInfo::forClient(opCtx->getClient()).setLastOp(lastOptime);
replCoord->setMyLastAppliedOpTime(lastOptime);
setNewTimestamp(replCoord->getServiceContext(), lastOptime.getTimestamp());
std::string msg = "oplog sync 1 of 3";
log() << msg;
- if (!_initialSyncApplyOplog(txn, &init, &r, bgsync)) {
+ if (!_initialSyncApplyOplog(opCtx, &init, &r, bgsync)) {
return Status(ErrorCodes::InitialSyncFailure,
str::stream() << "initial sync failed: " << msg);
}
@@ -404,7 +404,7 @@ Status _initialSync(OperationContext* txn, BackgroundSync* bgsync) {
// TODO: replace with "tail" instance below, since we don't need to retry/reclone missing docs.
msg = "oplog sync 2 of 3";
log() << msg;
- if (!_initialSyncApplyOplog(txn, &init, &r, bgsync)) {
+ if (!_initialSyncApplyOplog(opCtx, &init, &r, bgsync)) {
return Status(ErrorCodes::InitialSyncFailure,
str::stream() << "initial sync failed: " << msg);
}
@@ -413,7 +413,7 @@ Status _initialSync(OperationContext* txn, BackgroundSync* bgsync) {
msg = "initial sync building indexes";
log() << msg;
for (auto&& dbCollsPair : collectionsPerDb) {
- if (!_initialSyncClone(txn,
+ if (!_initialSyncClone(opCtx,
cloner,
r.conn()->getServerAddress(),
dbCollsPair.first,
@@ -431,14 +431,14 @@ Status _initialSync(OperationContext* txn, BackgroundSync* bgsync) {
log() << msg;
InitialSync tail(bgsync, multiSyncApply); // Use the non-initial sync apply code
- if (!_initialSyncApplyOplog(txn, &tail, &r, bgsync)) {
+ if (!_initialSyncApplyOplog(opCtx, &tail, &r, bgsync)) {
return Status(ErrorCodes::InitialSyncFailure,
str::stream() << "initial sync failed: " << msg);
}
// ---------
- Status status = getGlobalAuthorizationManager()->initialize(txn);
+ Status status = getGlobalAuthorizationManager()->initialize(opCtx);
if (!status.isOK()) {
warning() << "Failed to reinitialize auth data after initial sync. " << status;
return status;
@@ -448,7 +448,7 @@ Status _initialSync(OperationContext* txn, BackgroundSync* bgsync) {
// Initial sync is now complete.
// Clear the initial sync flag -- cannot be done under a db lock, or recursive.
- StorageInterface::get(txn)->clearInitialSyncFlag(txn);
+ StorageInterface::get(opCtx)->clearInitialSyncFlag(opCtx);
// Clear maint. mode.
while (replCoord->getMaintenanceMode()) {
@@ -463,20 +463,20 @@ stdx::mutex _initialSyncMutex;
const auto kInitialSyncRetrySleepDuration = Seconds{5};
} // namespace
-Status checkAdminDatabase(OperationContext* txn, Database* adminDb) {
- // Assumes txn holds MODE_X or MODE_S lock on "admin" database.
+Status checkAdminDatabase(OperationContext* opCtx, Database* adminDb) {
+ // Assumes opCtx holds MODE_X or MODE_S lock on "admin" database.
if (!adminDb) {
return Status::OK();
}
Collection* const usersCollection =
adminDb->getCollection(AuthorizationManager::usersCollectionNamespace);
const bool hasUsers =
- usersCollection && !Helpers::findOne(txn, usersCollection, BSONObj(), false).isNull();
+ usersCollection && !Helpers::findOne(opCtx, usersCollection, BSONObj(), false).isNull();
Collection* const adminVersionCollection =
adminDb->getCollection(AuthorizationManager::versionCollectionNamespace);
BSONObj authSchemaVersionDocument;
if (!adminVersionCollection ||
- !Helpers::findOne(txn,
+ !Helpers::findOne(opCtx,
adminVersionCollection,
AuthorizationManager::versionDocumentQuery,
authSchemaVersionDocument)) {
@@ -518,7 +518,7 @@ Status checkAdminDatabase(OperationContext* txn, Database* adminDb) {
return Status::OK();
}
-void syncDoInitialSync(OperationContext* txn,
+void syncDoInitialSync(OperationContext* opCtx,
ReplicationCoordinatorExternalState* replicationCoordinatorExternalState) {
stdx::unique_lock<stdx::mutex> lk(_initialSyncMutex, stdx::defer_lock);
if (!lk.try_lock()) {
@@ -530,21 +530,21 @@ void syncDoInitialSync(OperationContext* txn,
log() << "Starting replication fetcher thread for initial sync";
bgsync = stdx::make_unique<BackgroundSync>(
replicationCoordinatorExternalState,
- replicationCoordinatorExternalState->makeInitialSyncOplogBuffer(txn));
- bgsync->startup(txn);
- createOplog(txn);
+ replicationCoordinatorExternalState->makeInitialSyncOplogBuffer(opCtx));
+ bgsync->startup(opCtx);
+ createOplog(opCtx);
}
- ON_BLOCK_EXIT([txn, &bgsync]() {
+ ON_BLOCK_EXIT([opCtx, &bgsync]() {
log() << "Stopping replication fetcher thread for initial sync";
- bgsync->shutdown(txn);
- bgsync->join(txn);
+ bgsync->shutdown(opCtx);
+ bgsync->join(opCtx);
});
int failedAttempts = 0;
while (failedAttempts < num3Dot2InitialSyncAttempts.load()) {
try {
// leave loop when successful
- Status status = _initialSync(txn, bgsync.get());
+ Status status = _initialSync(opCtx, bgsync.get());
if (status.isOK()) {
break;
} else {
diff --git a/src/mongo/db/repl/rs_initialsync.h b/src/mongo/db/repl/rs_initialsync.h
index a7206ac5c2e..d621eb17954 100644
--- a/src/mongo/db/repl/rs_initialsync.h
+++ b/src/mongo/db/repl/rs_initialsync.h
@@ -41,13 +41,13 @@ class ReplicationCoordinatorExternalState;
* Begins an initial sync of a node. This drops all data, chooses a sync source,
* and runs the cloner from that sync source. The node's state is not changed.
*/
-void syncDoInitialSync(OperationContext* txn,
+void syncDoInitialSync(OperationContext* opCtx,
ReplicationCoordinatorExternalState* replicationCoordinatorExternalState);
/**
* Checks that the "admin" database contains a supported version of the auth data schema.
*/
-Status checkAdminDatabase(OperationContext* txn, Database* adminDb);
+Status checkAdminDatabase(OperationContext* opCtx, Database* adminDb);
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index dc5894cafc8..dbfb3a51284 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -335,7 +335,7 @@ namespace {
* information from the upstream node. If any information is fetched from the upstream node after we
* have written locally, the function must be called again.
*/
-void checkRbidAndUpdateMinValid(OperationContext* txn,
+void checkRbidAndUpdateMinValid(OperationContext* opCtx,
const int rbid,
const RollbackSource& rollbackSource) {
// It is important that the steps are performed in order to avoid racing with upstream rollbacks
@@ -357,8 +357,8 @@ void checkRbidAndUpdateMinValid(OperationContext* txn,
// online until we get to that point in freshness.
OpTime minValid = fassertStatusOK(28774, OpTime::parseFromOplogEntry(newMinValidDoc));
log() << "Setting minvalid to " << minValid;
- StorageInterface::get(txn)->setAppliedThrough(txn, {}); // Use top of oplog.
- StorageInterface::get(txn)->setMinValid(txn, minValid);
+ StorageInterface::get(opCtx)->setAppliedThrough(opCtx, {}); // Use top of oplog.
+ StorageInterface::get(opCtx)->setMinValid(opCtx, minValid);
if (MONGO_FAIL_POINT(rollbackHangThenFailAfterWritingMinValid)) {
// This log output is used in js tests so please leave it.
@@ -373,7 +373,7 @@ void checkRbidAndUpdateMinValid(OperationContext* txn,
}
}
-void syncFixUp(OperationContext* txn,
+void syncFixUp(OperationContext* opCtx,
const FixUpInfo& fixUpInfo,
const RollbackSource& rollbackSource,
ReplicationCoordinator* replCoord) {
@@ -415,7 +415,7 @@ void syncFixUp(OperationContext* txn,
}
log() << "rollback 3.5";
- checkRbidAndUpdateMinValid(txn, fixUpInfo.rbid, rollbackSource);
+ checkRbidAndUpdateMinValid(opCtx, fixUpInfo.rbid, rollbackSource);
// update them
log() << "rollback 4 n:" << goodVersions.size();
@@ -435,25 +435,25 @@ void syncFixUp(OperationContext* txn,
{
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_X);
- Database* db = dbHolder().openDb(txn, nss.db().toString());
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_X);
+ Database* db = dbHolder().openDb(opCtx, nss.db().toString());
invariant(db);
- WriteUnitOfWork wunit(txn);
- fassertStatusOK(40359, db->dropCollectionEvenIfSystem(txn, nss));
+ WriteUnitOfWork wunit(opCtx);
+ fassertStatusOK(40359, db->dropCollectionEvenIfSystem(opCtx, nss));
wunit.commit();
}
- rollbackSource.copyCollectionFromRemote(txn, nss);
+ rollbackSource.copyCollectionFromRemote(opCtx, nss);
}
for (const string& ns : fixUpInfo.collectionsToResyncMetadata) {
log() << "rollback 4.1.2 coll metadata resync " << ns;
const NamespaceString nss(ns);
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_X);
- auto db = dbHolder().openDb(txn, nss.db().toString());
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_X);
+ auto db = dbHolder().openDb(opCtx, nss.db().toString());
invariant(db);
auto collection = db->getCollection(ns);
invariant(collection);
@@ -490,23 +490,23 @@ void syncFixUp(OperationContext* txn,
// Use default options.
}
- WriteUnitOfWork wuow(txn);
- if (options.flagsSet || cce->getCollectionOptions(txn).flagsSet) {
- cce->updateFlags(txn, options.flags);
+ WriteUnitOfWork wuow(opCtx);
+ if (options.flagsSet || cce->getCollectionOptions(opCtx).flagsSet) {
+ cce->updateFlags(opCtx, options.flags);
}
- auto status = collection->setValidator(txn, options.validator);
+ auto status = collection->setValidator(opCtx, options.validator);
if (!status.isOK()) {
throw RSFatalException(str::stream() << "Failed to set validator: "
<< status.toString());
}
- status = collection->setValidationAction(txn, options.validationAction);
+ status = collection->setValidationAction(opCtx, options.validationAction);
if (!status.isOK()) {
throw RSFatalException(str::stream() << "Failed to set validationAction: "
<< status.toString());
}
- status = collection->setValidationLevel(txn, options.validationLevel);
+ status = collection->setValidationLevel(opCtx, options.validationLevel);
if (!status.isOK()) {
throw RSFatalException(str::stream() << "Failed to set validationLevel: "
<< status.toString());
@@ -518,7 +518,7 @@ void syncFixUp(OperationContext* txn,
// we did more reading from primary, so check it again for a rollback (which would mess
// us up), and make minValid newer.
log() << "rollback 4.2";
- checkRbidAndUpdateMinValid(txn, fixUpInfo.rbid, rollbackSource);
+ checkRbidAndUpdateMinValid(opCtx, fixUpInfo.rbid, rollbackSource);
}
log() << "rollback 4.6";
@@ -530,16 +530,16 @@ void syncFixUp(OperationContext* txn,
invariant(!fixUpInfo.indexesToDrop.count(*it));
- ScopedTransaction transaction(txn, MODE_IX);
+ ScopedTransaction transaction(opCtx, MODE_IX);
const NamespaceString nss(*it);
- Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_X);
- Database* db = dbHolder().get(txn, nsToDatabaseSubstring(*it));
+ Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_X);
+ Database* db = dbHolder().get(opCtx, nsToDatabaseSubstring(*it));
if (db) {
Helpers::RemoveSaver removeSaver("rollback", "", *it);
// perform a collection scan and write all documents in the collection to disk
std::unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(
- txn, *it, db->getCollection(*it), PlanExecutor::YIELD_AUTO));
+ opCtx, *it, db->getCollection(*it), PlanExecutor::YIELD_AUTO));
BSONObj curObj;
PlanExecutor::ExecState execState;
while (PlanExecutor::ADVANCED == (execState = exec->getNext(&curObj, NULL))) {
@@ -564,8 +564,8 @@ void syncFixUp(OperationContext* txn,
throw RSFatalException();
}
- WriteUnitOfWork wunit(txn);
- fassertStatusOK(40360, db->dropCollectionEvenIfSystem(txn, nss));
+ WriteUnitOfWork wunit(opCtx);
+ fassertStatusOK(40360, db->dropCollectionEvenIfSystem(opCtx, nss));
wunit.commit();
}
}
@@ -576,9 +576,9 @@ void syncFixUp(OperationContext* txn,
const string& indexName = it->second;
log() << "rollback drop index: collection: " << nss.toString() << ". index: " << indexName;
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_X);
- auto db = dbHolder().get(txn, nss.db());
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_X);
+ auto db = dbHolder().get(opCtx, nss.db());
if (!db) {
continue;
}
@@ -592,14 +592,14 @@ void syncFixUp(OperationContext* txn,
}
bool includeUnfinishedIndexes = false;
auto indexDescriptor =
- indexCatalog->findIndexByName(txn, indexName, includeUnfinishedIndexes);
+ indexCatalog->findIndexByName(opCtx, indexName, includeUnfinishedIndexes);
if (!indexDescriptor) {
warning() << "rollback failed to drop index " << indexName << " in " << nss.toString()
<< ": index not found";
continue;
}
- WriteUnitOfWork wunit(txn);
- auto status = indexCatalog->dropIndex(txn, indexDescriptor);
+ WriteUnitOfWork wunit(opCtx);
+ auto status = indexCatalog->dropIndex(opCtx, indexDescriptor);
if (!status.isOK()) {
severe() << "rollback failed to drop index " << indexName << " in " << nss.toString()
<< ": " << status;
@@ -637,9 +637,9 @@ void syncFixUp(OperationContext* txn,
// TODO: Lots of overhead in context. This can be faster.
const NamespaceString docNss(doc.ns);
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock docDbLock(txn->lockState(), docNss.db(), MODE_X);
- OldClientContext ctx(txn, doc.ns);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock docDbLock(opCtx->lockState(), docNss.db(), MODE_X);
+ OldClientContext ctx(opCtx, doc.ns);
Collection* collection = ctx.db()->getCollection(doc.ns);
@@ -651,7 +651,7 @@ void syncFixUp(OperationContext* txn,
// createCollection command and regardless, the document no longer exists.
if (collection && removeSaver) {
BSONObj obj;
- bool found = Helpers::findOne(txn, collection, pattern, obj, false);
+ bool found = Helpers::findOne(opCtx, collection, pattern, obj, false);
if (found) {
auto status = removeSaver->goingToDelete(obj);
if (!status.isOK()) {
@@ -680,9 +680,9 @@ void syncFixUp(OperationContext* txn,
// TODO: IIRC cappedTruncateAfter does not handle completely
// empty.
// this will crazy slow if no _id index.
- const auto clock = txn->getServiceContext()->getFastClockSource();
+ const auto clock = opCtx->getServiceContext()->getFastClockSource();
const auto findOneStart = clock->now();
- RecordId loc = Helpers::findOne(txn, collection, pattern, false);
+ RecordId loc = Helpers::findOne(opCtx, collection, pattern, false);
if (clock->now() - findOneStart > Milliseconds(200))
warning() << "roll back slow no _id index for " << doc.ns
<< " perhaps?";
@@ -690,17 +690,17 @@ void syncFixUp(OperationContext* txn,
// RecordId loc = Helpers::findById(nsd, pattern);
if (!loc.isNull()) {
try {
- collection->cappedTruncateAfter(txn, loc, true);
+ collection->cappedTruncateAfter(opCtx, loc, true);
} catch (const DBException& e) {
if (e.getCode() == 13415) {
// hack: need to just make cappedTruncate do this...
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(txn);
- uassertStatusOK(collection->truncate(txn));
+ WriteUnitOfWork wunit(opCtx);
+ uassertStatusOK(collection->truncate(opCtx));
wunit.commit();
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
- txn, "truncate", collection->ns().ns());
+ opCtx, "truncate", collection->ns().ns());
} else {
throw e;
}
@@ -717,7 +717,7 @@ void syncFixUp(OperationContext* txn,
<< ": " << redact(e);
}
} else {
- deleteObjects(txn,
+ deleteObjects(opCtx,
collection,
doc.ns,
pattern,
@@ -740,7 +740,7 @@ void syncFixUp(OperationContext* txn,
UpdateLifecycleImpl updateLifecycle(requestNs);
request.setLifecycle(&updateLifecycle);
- update(txn, ctx.db(), request);
+ update(opCtx, ctx.db(), request);
}
} catch (const DBException& e) {
log() << "exception in rollback ns:" << doc.ns << ' ' << pattern.toString() << ' '
@@ -757,10 +757,10 @@ void syncFixUp(OperationContext* txn,
LOG(2) << "rollback truncate oplog after " << fixUpInfo.commonPoint.toString();
{
const NamespaceString oplogNss(rsOplogName);
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock oplogDbLock(txn->lockState(), oplogNss.db(), MODE_IX);
- Lock::CollectionLock oplogCollectionLoc(txn->lockState(), oplogNss.ns(), MODE_X);
- OldClientContext ctx(txn, rsOplogName);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock oplogDbLock(opCtx->lockState(), oplogNss.db(), MODE_IX);
+ Lock::CollectionLock oplogCollectionLoc(opCtx->lockState(), oplogNss.ns(), MODE_X);
+ OldClientContext ctx(opCtx, rsOplogName);
Collection* oplogCollection = ctx.db()->getCollection(rsOplogName);
if (!oplogCollection) {
fassertFailedWithStatusNoTrace(13423,
@@ -768,10 +768,10 @@ void syncFixUp(OperationContext* txn,
str::stream() << "Can't find " << rsOplogName));
}
// TODO: fatal error if this throws?
- oplogCollection->cappedTruncateAfter(txn, fixUpInfo.commonPointOurDiskloc, false);
+ oplogCollection->cappedTruncateAfter(opCtx, fixUpInfo.commonPointOurDiskloc, false);
}
- Status status = getGlobalAuthorizationManager()->initialize(txn);
+ Status status = getGlobalAuthorizationManager()->initialize(opCtx);
if (!status.isOK()) {
severe() << "Failed to reinitialize auth data after rollback: " << status;
fassertFailedNoTrace(40366);
@@ -779,16 +779,16 @@ void syncFixUp(OperationContext* txn,
// Reload the lastAppliedOpTime and lastDurableOpTime value in the replcoord and the
// lastAppliedHash value in bgsync to reflect our new last op.
- replCoord->resetLastOpTimesFromOplog(txn);
+ replCoord->resetLastOpTimesFromOplog(opCtx);
log() << "rollback done";
}
-Status _syncRollback(OperationContext* txn,
+Status _syncRollback(OperationContext* opCtx,
const OplogInterface& localOplog,
const RollbackSource& rollbackSource,
boost::optional<int> requiredRBID,
ReplicationCoordinator* replCoord) {
- invariant(!txn->lockState()->isLocked());
+ invariant(!opCtx->lockState()->isLocked());
FixUpInfo how;
log() << "rollback 1";
@@ -833,7 +833,7 @@ Status _syncRollback(OperationContext* txn,
log() << "rollback 3 fixup";
try {
ON_BLOCK_EXIT([&] { replCoord->incrementRollbackID(); });
- syncFixUp(txn, how, rollbackSource, replCoord);
+ syncFixUp(opCtx, how, rollbackSource, replCoord);
} catch (const RSFatalException& e) {
return Status(ErrorCodes::UnrecoverableRollbackError, e.what(), 18753);
}
@@ -853,19 +853,19 @@ Status _syncRollback(OperationContext* txn,
} // namespace
-Status syncRollback(OperationContext* txn,
+Status syncRollback(OperationContext* opCtx,
const OplogInterface& localOplog,
const RollbackSource& rollbackSource,
boost::optional<int> requiredRBID,
ReplicationCoordinator* replCoord) {
- invariant(txn);
+ invariant(opCtx);
invariant(replCoord);
log() << "beginning rollback" << rsLog;
- DisableDocumentValidation validationDisabler(txn);
- UnreplicatedWritesBlock replicationDisabler(txn);
- Status status = _syncRollback(txn, localOplog, rollbackSource, requiredRBID, replCoord);
+ DisableDocumentValidation validationDisabler(opCtx);
+ UnreplicatedWritesBlock replicationDisabler(opCtx);
+ Status status = _syncRollback(opCtx, localOplog, rollbackSource, requiredRBID, replCoord);
log() << "rollback finished" << rsLog;
return status;
diff --git a/src/mongo/db/repl/rs_rollback.h b/src/mongo/db/repl/rs_rollback.h
index 8ee7dd04367..58dd4a27d4f 100644
--- a/src/mongo/db/repl/rs_rollback.h
+++ b/src/mongo/db/repl/rs_rollback.h
@@ -59,7 +59,7 @@ class RollbackSource;
* This function runs a command on the sync source to detect if the sync source rolls back
* while our rollback is in progress.
*
- * @param txn Used to read and write from this node's databases
+ * @param opCtx Used to read and write from this node's databases
* @param localOplog reads the oplog on this server.
* @param rollbackSource interface for sync source:
* provides oplog; and
@@ -73,7 +73,7 @@ class RollbackSource;
* fatally. All other errors should be considered recoverable regardless of whether reported as a
* status or exception.
*/
-Status syncRollback(OperationContext* txn,
+Status syncRollback(OperationContext* opCtx,
const OplogInterface& localOplog,
const RollbackSource& rollbackSource,
boost::optional<int> requiredRBID,
diff --git a/src/mongo/db/repl/rs_rollback_test.cpp b/src/mongo/db/repl/rs_rollback_test.cpp
index 988fe98face..22e7a677fea 100644
--- a/src/mongo/db/repl/rs_rollback_test.cpp
+++ b/src/mongo/db/repl/rs_rollback_test.cpp
@@ -79,7 +79,7 @@ class ReplicationCoordinatorRollbackMock : public ReplicationCoordinatorMock {
public:
ReplicationCoordinatorRollbackMock(ServiceContext* service)
: ReplicationCoordinatorMock(service, createReplSettings()) {}
- void resetLastOpTimesFromOplog(OperationContext* txn) override {}
+ void resetLastOpTimesFromOplog(OperationContext* opCtx) override {}
};
@@ -90,7 +90,8 @@ public:
const OplogInterface& getOplog() const override;
BSONObj getLastOperation() const override;
BSONObj findOne(const NamespaceString& nss, const BSONObj& filter) const override;
- void copyCollectionFromRemote(OperationContext* txn, const NamespaceString& nss) const override;
+ void copyCollectionFromRemote(OperationContext* opCtx,
+ const NamespaceString& nss) const override;
StatusWith<BSONObj> getCollectionInfo(const NamespaceString& nss) const override;
private:
@@ -119,7 +120,7 @@ BSONObj RollbackSourceMock::findOne(const NamespaceString& nss, const BSONObj& f
return BSONObj();
}
-void RollbackSourceMock::copyCollectionFromRemote(OperationContext* txn,
+void RollbackSourceMock::copyCollectionFromRemote(OperationContext* opCtx,
const NamespaceString& nss) const {}
StatusWith<BSONObj> RollbackSourceMock::getCollectionInfo(const NamespaceString& nss) const {
@@ -128,7 +129,7 @@ StatusWith<BSONObj> RollbackSourceMock::getCollectionInfo(const NamespaceString&
class RSRollbackTest : public ServiceContextMongoDTest {
protected:
- ServiceContext::UniqueOperationContext _txn;
+ ServiceContext::UniqueOperationContext _opCtx;
// Owned by service context
ReplicationCoordinator* _coordinator;
@@ -140,8 +141,8 @@ private:
void RSRollbackTest::setUp() {
ServiceContextMongoDTest::setUp();
- _txn = cc().makeOperationContext();
- _coordinator = new ReplicationCoordinatorRollbackMock(_txn->getServiceContext());
+ _opCtx = cc().makeOperationContext();
+ _coordinator = new ReplicationCoordinatorRollbackMock(_opCtx->getServiceContext());
auto serviceContext = getServiceContext();
ReplicationCoordinator::set(serviceContext,
@@ -149,22 +150,22 @@ void RSRollbackTest::setUp() {
StorageInterface::set(serviceContext, stdx::make_unique<StorageInterfaceMock>());
setOplogCollectionName();
- repl::StorageInterface::get(_txn.get())->setAppliedThrough(_txn.get(), OpTime{});
- repl::StorageInterface::get(_txn.get())->setMinValid(_txn.get(), OpTime{});
+ repl::StorageInterface::get(_opCtx.get())->setAppliedThrough(_opCtx.get(), OpTime{});
+ repl::StorageInterface::get(_opCtx.get())->setMinValid(_opCtx.get(), OpTime{});
}
void RSRollbackTest::tearDown() {
- _txn.reset();
+ _opCtx.reset();
ServiceContextMongoDTest::tearDown();
setGlobalReplicationCoordinator(nullptr);
}
TEST_F(RSRollbackTest, InconsistentMinValid) {
- repl::StorageInterface::get(_txn.get())
- ->setAppliedThrough(_txn.get(), OpTime(Timestamp(Seconds(0), 0), 0));
- repl::StorageInterface::get(_txn.get())
- ->setMinValid(_txn.get(), OpTime(Timestamp(Seconds(1), 0), 0));
- auto status = syncRollback(_txn.get(),
+ repl::StorageInterface::get(_opCtx.get())
+ ->setAppliedThrough(_opCtx.get(), OpTime(Timestamp(Seconds(0), 0), 0));
+ repl::StorageInterface::get(_opCtx.get())
+ ->setMinValid(_opCtx.get(), OpTime(Timestamp(Seconds(1), 0), 0));
+ auto status = syncRollback(_opCtx.get(),
OplogInterfaceMock(kEmptyMockOperations),
RollbackSourceMock(std::unique_ptr<OplogInterface>(
new OplogInterfaceMock(kEmptyMockOperations))),
@@ -180,7 +181,7 @@ TEST_F(RSRollbackTest, OplogStartMissing) {
std::make_pair(BSON("ts" << ts.getTimestamp() << "h" << ts.getTerm()), RecordId());
ASSERT_EQUALS(
ErrorCodes::OplogStartMissing,
- syncRollback(_txn.get(),
+ syncRollback(_opCtx.get(),
OplogInterfaceMock(kEmptyMockOperations),
RollbackSourceMock(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
operation,
@@ -194,7 +195,7 @@ TEST_F(RSRollbackTest, NoRemoteOpLog) {
OpTime ts(Timestamp(Seconds(1), 0), 0);
auto operation =
std::make_pair(BSON("ts" << ts.getTimestamp() << "h" << ts.getTerm()), RecordId());
- auto status = syncRollback(_txn.get(),
+ auto status = syncRollback(_opCtx.get(),
OplogInterfaceMock({operation}),
RollbackSourceMock(std::unique_ptr<OplogInterface>(
new OplogInterfaceMock(kEmptyMockOperations))),
@@ -216,7 +217,7 @@ TEST_F(RSRollbackTest, RemoteGetRollbackIdThrows) {
uassert(ErrorCodes::UnknownError, "getRollbackId() failed", false);
}
};
- ASSERT_THROWS_CODE(syncRollback(_txn.get(),
+ ASSERT_THROWS_CODE(syncRollback(_opCtx.get(),
OplogInterfaceMock({operation}),
RollbackSourceLocal(std::unique_ptr<OplogInterface>(
new OplogInterfaceMock(kEmptyMockOperations))),
@@ -239,7 +240,7 @@ TEST_F(RSRollbackTest, RemoteGetRollbackIdDiffersFromRequiredRBID) {
}
};
- ASSERT_THROWS_CODE(syncRollback(_txn.get(),
+ ASSERT_THROWS_CODE(syncRollback(_opCtx.get(),
OplogInterfaceMock({operation}),
RollbackSourceLocal(std::unique_ptr<OplogInterface>(
new OplogInterfaceMock(kEmptyMockOperations))),
@@ -250,12 +251,12 @@ TEST_F(RSRollbackTest, RemoteGetRollbackIdDiffersFromRequiredRBID) {
}
TEST_F(RSRollbackTest, BothOplogsAtCommonPoint) {
- createOplog(_txn.get());
+ createOplog(_opCtx.get());
OpTime ts(Timestamp(Seconds(1), 0), 1);
auto operation =
std::make_pair(BSON("ts" << ts.getTimestamp() << "h" << ts.getTerm()), RecordId(1));
ASSERT_OK(
- syncRollback(_txn.get(),
+ syncRollback(_opCtx.get(),
OplogInterfaceMock({operation}),
RollbackSourceMock(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
operation,
@@ -268,24 +269,24 @@ TEST_F(RSRollbackTest, BothOplogsAtCommonPoint) {
* Create test collection.
* Returns collection.
*/
-Collection* _createCollection(OperationContext* txn,
+Collection* _createCollection(OperationContext* opCtx,
const NamespaceString& nss,
const CollectionOptions& options) {
- Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_X);
- mongo::WriteUnitOfWork wuow(txn);
- auto db = dbHolder().openDb(txn, nss.db());
+ Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_X);
+ mongo::WriteUnitOfWork wuow(opCtx);
+ auto db = dbHolder().openDb(opCtx, nss.db());
ASSERT_TRUE(db);
- db->dropCollection(txn, nss.ns());
- auto coll = db->createCollection(txn, nss.ns(), options);
+ db->dropCollection(opCtx, nss.ns());
+ auto coll = db->createCollection(opCtx, nss.ns(), options);
ASSERT_TRUE(coll);
wuow.commit();
return coll;
}
-Collection* _createCollection(OperationContext* txn,
+Collection* _createCollection(OperationContext* opCtx,
const std::string& nss,
const CollectionOptions& options) {
- return _createCollection(txn, NamespaceString(nss), options);
+ return _createCollection(opCtx, NamespaceString(nss), options);
}
/**
@@ -293,7 +294,7 @@ Collection* _createCollection(OperationContext* txn,
* Returns number of records in collection after rolling back delete operation.
* If collection does not exist after rolling back, returns -1.
*/
-int _testRollbackDelete(OperationContext* txn,
+int _testRollbackDelete(OperationContext* opCtx,
ReplicationCoordinator* coordinator,
const BSONObj& documentAtSource) {
auto commonOperation =
@@ -325,54 +326,54 @@ int _testRollbackDelete(OperationContext* txn,
std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
commonOperation,
})));
- ASSERT_OK(syncRollback(txn,
+ ASSERT_OK(syncRollback(opCtx,
OplogInterfaceMock({deleteOperation, commonOperation}),
rollbackSource,
{},
coordinator));
ASSERT_TRUE(rollbackSource.called);
- Lock::DBLock dbLock(txn->lockState(), "test", MODE_S);
- Lock::CollectionLock collLock(txn->lockState(), "test.t", MODE_S);
- auto db = dbHolder().get(txn, "test");
+ Lock::DBLock dbLock(opCtx->lockState(), "test", MODE_S);
+ Lock::CollectionLock collLock(opCtx->lockState(), "test.t", MODE_S);
+ auto db = dbHolder().get(opCtx, "test");
ASSERT_TRUE(db);
auto collection = db->getCollection("test.t");
if (!collection) {
return -1;
}
- return collection->getRecordStore()->numRecords(txn);
+ return collection->getRecordStore()->numRecords(opCtx);
}
TEST_F(RSRollbackTest, RollbackDeleteNoDocumentAtSourceCollectionDoesNotExist) {
- createOplog(_txn.get());
- ASSERT_EQUALS(-1, _testRollbackDelete(_txn.get(), _coordinator, BSONObj()));
+ createOplog(_opCtx.get());
+ ASSERT_EQUALS(-1, _testRollbackDelete(_opCtx.get(), _coordinator, BSONObj()));
}
TEST_F(RSRollbackTest, RollbackDeleteNoDocumentAtSourceCollectionExistsNonCapped) {
- createOplog(_txn.get());
- _createCollection(_txn.get(), "test.t", CollectionOptions());
- _testRollbackDelete(_txn.get(), _coordinator, BSONObj());
- ASSERT_EQUALS(0, _testRollbackDelete(_txn.get(), _coordinator, BSONObj()));
+ createOplog(_opCtx.get());
+ _createCollection(_opCtx.get(), "test.t", CollectionOptions());
+ _testRollbackDelete(_opCtx.get(), _coordinator, BSONObj());
+ ASSERT_EQUALS(0, _testRollbackDelete(_opCtx.get(), _coordinator, BSONObj()));
}
TEST_F(RSRollbackTest, RollbackDeleteNoDocumentAtSourceCollectionExistsCapped) {
- createOplog(_txn.get());
+ createOplog(_opCtx.get());
CollectionOptions options;
options.capped = true;
- _createCollection(_txn.get(), "test.t", options);
- ASSERT_EQUALS(0, _testRollbackDelete(_txn.get(), _coordinator, BSONObj()));
+ _createCollection(_opCtx.get(), "test.t", options);
+ ASSERT_EQUALS(0, _testRollbackDelete(_opCtx.get(), _coordinator, BSONObj()));
}
TEST_F(RSRollbackTest, RollbackDeleteRestoreDocument) {
- createOplog(_txn.get());
- _createCollection(_txn.get(), "test.t", CollectionOptions());
+ createOplog(_opCtx.get());
+ _createCollection(_opCtx.get(), "test.t", CollectionOptions());
BSONObj doc = BSON("_id" << 0 << "a" << 1);
- _testRollbackDelete(_txn.get(), _coordinator, doc);
- ASSERT_EQUALS(1, _testRollbackDelete(_txn.get(), _coordinator, doc));
+ _testRollbackDelete(_opCtx.get(), _coordinator, doc);
+ ASSERT_EQUALS(1, _testRollbackDelete(_opCtx.get(), _coordinator, doc));
}
TEST_F(RSRollbackTest, RollbackInsertDocumentWithNoId) {
- createOplog(_txn.get());
+ createOplog(_opCtx.get());
auto commonOperation =
std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1));
auto insertDocumentOperation =
@@ -400,7 +401,7 @@ TEST_F(RSRollbackTest, RollbackInsertDocumentWithNoId) {
commonOperation,
})));
startCapturingLogMessages();
- auto status = syncRollback(_txn.get(),
+ auto status = syncRollback(_opCtx.get(),
OplogInterfaceMock({insertDocumentOperation, commonOperation}),
rollbackSource,
{},
@@ -413,8 +414,8 @@ TEST_F(RSRollbackTest, RollbackInsertDocumentWithNoId) {
}
TEST_F(RSRollbackTest, RollbackCreateIndexCommand) {
- createOplog(_txn.get());
- auto collection = _createCollection(_txn.get(), "test.t", CollectionOptions());
+ createOplog(_opCtx.get());
+ auto collection = _createCollection(_opCtx.get(), "test.t", CollectionOptions());
auto indexSpec = BSON("ns"
<< "test.t"
<< "key"
@@ -424,15 +425,15 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommand) {
<< "v"
<< static_cast<int>(kIndexVersion));
{
- Lock::DBLock dbLock(_txn->lockState(), "test", MODE_X);
- MultiIndexBlock indexer(_txn.get(), collection);
+ Lock::DBLock dbLock(_opCtx->lockState(), "test", MODE_X);
+ MultiIndexBlock indexer(_opCtx.get(), collection);
ASSERT_OK(indexer.init(indexSpec).getStatus());
- WriteUnitOfWork wunit(_txn.get());
+ WriteUnitOfWork wunit(_opCtx.get());
indexer.commit();
wunit.commit();
auto indexCatalog = collection->getIndexCatalog();
ASSERT(indexCatalog);
- ASSERT_EQUALS(2, indexCatalog->numIndexesReady(_txn.get()));
+ ASSERT_EQUALS(2, indexCatalog->numIndexesReady(_opCtx.get()));
}
auto commonOperation =
std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1));
@@ -448,7 +449,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommand) {
public:
RollbackSourceLocal(std::unique_ptr<OplogInterface> oplog)
: RollbackSourceMock(std::move(oplog)), called(false) {}
- void copyCollectionFromRemote(OperationContext* txn,
+ void copyCollectionFromRemote(OperationContext* opCtx,
const NamespaceString& nss) const override {
called = true;
}
@@ -464,7 +465,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommand) {
// This can happen when an index is re-created with different options.
startCapturingLogMessages();
ASSERT_OK(syncRollback(
- _txn.get(),
+ _opCtx.get(),
OplogInterfaceMock({insertDocumentOperation, insertDocumentOperation, commonOperation}),
rollbackSource,
{},
@@ -474,16 +475,16 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommand) {
countLogLinesContaining("rollback drop index: collection: test.t. index: a_1"));
ASSERT_FALSE(rollbackSource.called);
{
- Lock::DBLock dbLock(_txn->lockState(), "test", MODE_S);
+ Lock::DBLock dbLock(_opCtx->lockState(), "test", MODE_S);
auto indexCatalog = collection->getIndexCatalog();
ASSERT(indexCatalog);
- ASSERT_EQUALS(1, indexCatalog->numIndexesReady(_txn.get()));
+ ASSERT_EQUALS(1, indexCatalog->numIndexesReady(_opCtx.get()));
}
}
TEST_F(RSRollbackTest, RollbackCreateIndexCommandIndexNotInCatalog) {
- createOplog(_txn.get());
- auto collection = _createCollection(_txn.get(), "test.t", CollectionOptions());
+ createOplog(_opCtx.get());
+ auto collection = _createCollection(_opCtx.get(), "test.t", CollectionOptions());
auto indexSpec = BSON("ns"
<< "test.t"
<< "key"
@@ -492,10 +493,10 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandIndexNotInCatalog) {
<< "a_1");
// Skip index creation to trigger warning during rollback.
{
- Lock::DBLock dbLock(_txn->lockState(), "test", MODE_S);
+ Lock::DBLock dbLock(_opCtx->lockState(), "test", MODE_S);
auto indexCatalog = collection->getIndexCatalog();
ASSERT(indexCatalog);
- ASSERT_EQUALS(1, indexCatalog->numIndexesReady(_txn.get()));
+ ASSERT_EQUALS(1, indexCatalog->numIndexesReady(_opCtx.get()));
}
auto commonOperation =
std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1));
@@ -511,7 +512,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandIndexNotInCatalog) {
public:
RollbackSourceLocal(std::unique_ptr<OplogInterface> oplog)
: RollbackSourceMock(std::move(oplog)), called(false) {}
- void copyCollectionFromRemote(OperationContext* txn,
+ void copyCollectionFromRemote(OperationContext* opCtx,
const NamespaceString& nss) const override {
called = true;
}
@@ -524,7 +525,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandIndexNotInCatalog) {
commonOperation,
})));
startCapturingLogMessages();
- ASSERT_OK(syncRollback(_txn.get(),
+ ASSERT_OK(syncRollback(_opCtx.get(),
OplogInterfaceMock({insertDocumentOperation, commonOperation}),
rollbackSource,
{},
@@ -535,15 +536,15 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandIndexNotInCatalog) {
ASSERT_EQUALS(1, countLogLinesContaining("rollback failed to drop index a_1 in test.t"));
ASSERT_FALSE(rollbackSource.called);
{
- Lock::DBLock dbLock(_txn->lockState(), "test", MODE_S);
+ Lock::DBLock dbLock(_opCtx->lockState(), "test", MODE_S);
auto indexCatalog = collection->getIndexCatalog();
ASSERT(indexCatalog);
- ASSERT_EQUALS(1, indexCatalog->numIndexesReady(_txn.get()));
+ ASSERT_EQUALS(1, indexCatalog->numIndexesReady(_opCtx.get()));
}
}
TEST_F(RSRollbackTest, RollbackCreateIndexCommandMissingNamespace) {
- createOplog(_txn.get());
+ createOplog(_opCtx.get());
auto commonOperation =
std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1));
auto insertDocumentOperation =
@@ -559,7 +560,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandMissingNamespace) {
public:
RollbackSourceLocal(std::unique_ptr<OplogInterface> oplog)
: RollbackSourceMock(std::move(oplog)), called(false) {}
- void copyCollectionFromRemote(OperationContext* txn,
+ void copyCollectionFromRemote(OperationContext* opCtx,
const NamespaceString& nss) const override {
called = true;
}
@@ -572,7 +573,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandMissingNamespace) {
commonOperation,
})));
startCapturingLogMessages();
- auto status = syncRollback(_txn.get(),
+ auto status = syncRollback(_opCtx.get(),
OplogInterfaceMock({insertDocumentOperation, commonOperation}),
rollbackSource,
{},
@@ -586,7 +587,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandMissingNamespace) {
}
TEST_F(RSRollbackTest, RollbackCreateIndexCommandInvalidNamespace) {
- createOplog(_txn.get());
+ createOplog(_opCtx.get());
auto commonOperation =
std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1));
auto insertDocumentOperation =
@@ -606,7 +607,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandInvalidNamespace) {
public:
RollbackSourceLocal(std::unique_ptr<OplogInterface> oplog)
: RollbackSourceMock(std::move(oplog)), called(false) {}
- void copyCollectionFromRemote(OperationContext* txn,
+ void copyCollectionFromRemote(OperationContext* opCtx,
const NamespaceString& nss) const override {
called = true;
}
@@ -619,7 +620,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandInvalidNamespace) {
commonOperation,
})));
startCapturingLogMessages();
- auto status = syncRollback(_txn.get(),
+ auto status = syncRollback(_opCtx.get(),
OplogInterfaceMock({insertDocumentOperation, commonOperation}),
rollbackSource,
{},
@@ -633,7 +634,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandInvalidNamespace) {
}
TEST_F(RSRollbackTest, RollbackCreateIndexCommandMissingIndexName) {
- createOplog(_txn.get());
+ createOplog(_opCtx.get());
auto commonOperation =
std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1));
auto insertDocumentOperation =
@@ -651,7 +652,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandMissingIndexName) {
public:
RollbackSourceLocal(std::unique_ptr<OplogInterface> oplog)
: RollbackSourceMock(std::move(oplog)), called(false) {}
- void copyCollectionFromRemote(OperationContext* txn,
+ void copyCollectionFromRemote(OperationContext* opCtx,
const NamespaceString& nss) const override {
called = true;
}
@@ -664,7 +665,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandMissingIndexName) {
commonOperation,
})));
startCapturingLogMessages();
- auto status = syncRollback(_txn.get(),
+ auto status = syncRollback(_opCtx.get(),
OplogInterfaceMock({insertDocumentOperation, commonOperation}),
rollbackSource,
{},
@@ -677,7 +678,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandMissingIndexName) {
}
TEST_F(RSRollbackTest, RollbackUnknownCommand) {
- createOplog(_txn.get());
+ createOplog(_opCtx.get());
auto commonOperation =
std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1));
auto unknownCommandOperation =
@@ -690,15 +691,15 @@ TEST_F(RSRollbackTest, RollbackUnknownCommand) {
<< "t")),
RecordId(2));
{
- Lock::DBLock dbLock(_txn->lockState(), "test", MODE_X);
- mongo::WriteUnitOfWork wuow(_txn.get());
- auto db = dbHolder().openDb(_txn.get(), "test");
+ Lock::DBLock dbLock(_opCtx->lockState(), "test", MODE_X);
+ mongo::WriteUnitOfWork wuow(_opCtx.get());
+ auto db = dbHolder().openDb(_opCtx.get(), "test");
ASSERT_TRUE(db);
- ASSERT_TRUE(db->getOrCreateCollection(_txn.get(), "test.t"));
+ ASSERT_TRUE(db->getOrCreateCollection(_opCtx.get(), "test.t"));
wuow.commit();
}
auto status =
- syncRollback(_txn.get(),
+ syncRollback(_opCtx.get(),
OplogInterfaceMock({unknownCommandOperation, commonOperation}),
RollbackSourceMock(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
commonOperation,
@@ -710,7 +711,7 @@ TEST_F(RSRollbackTest, RollbackUnknownCommand) {
}
TEST_F(RSRollbackTest, RollbackDropCollectionCommand) {
- createOplog(_txn.get());
+ createOplog(_opCtx.get());
auto commonOperation =
std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1));
auto dropCollectionOperation =
@@ -726,7 +727,7 @@ TEST_F(RSRollbackTest, RollbackDropCollectionCommand) {
public:
RollbackSourceLocal(std::unique_ptr<OplogInterface> oplog)
: RollbackSourceMock(std::move(oplog)), called(false) {}
- void copyCollectionFromRemote(OperationContext* txn,
+ void copyCollectionFromRemote(OperationContext* opCtx,
const NamespaceString& nss) const override {
called = true;
}
@@ -735,8 +736,8 @@ TEST_F(RSRollbackTest, RollbackDropCollectionCommand) {
RollbackSourceLocal rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
commonOperation,
})));
- _createCollection(_txn.get(), "test.t", CollectionOptions());
- ASSERT_OK(syncRollback(_txn.get(),
+ _createCollection(_opCtx.get(), "test.t", CollectionOptions());
+ ASSERT_OK(syncRollback(_opCtx.get(),
OplogInterfaceMock({dropCollectionOperation, commonOperation}),
rollbackSource,
{},
@@ -745,7 +746,7 @@ TEST_F(RSRollbackTest, RollbackDropCollectionCommand) {
}
TEST_F(RSRollbackTest, RollbackDropCollectionCommandFailsIfRBIDChangesWhileSyncingCollection) {
- createOplog(_txn.get());
+ createOplog(_opCtx.get());
auto commonOperation =
std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1));
auto dropCollectionOperation =
@@ -763,7 +764,7 @@ TEST_F(RSRollbackTest, RollbackDropCollectionCommandFailsIfRBIDChangesWhileSynci
int getRollbackId() const override {
return copyCollectionCalled ? 1 : 0;
}
- void copyCollectionFromRemote(OperationContext* txn,
+ void copyCollectionFromRemote(OperationContext* opCtx,
const NamespaceString& nss) const override {
copyCollectionCalled = true;
}
@@ -773,8 +774,8 @@ TEST_F(RSRollbackTest, RollbackDropCollectionCommandFailsIfRBIDChangesWhileSynci
commonOperation,
})));
- _createCollection(_txn.get(), "test.t", CollectionOptions());
- ASSERT_THROWS_CODE(syncRollback(_txn.get(),
+ _createCollection(_opCtx.get(), "test.t", CollectionOptions());
+ ASSERT_THROWS_CODE(syncRollback(_opCtx.get(),
OplogInterfaceMock({dropCollectionOperation, commonOperation}),
rollbackSource,
{0},
@@ -815,22 +816,22 @@ OpTime getOpTimeFromOplogEntry(const BSONObj& entry) {
}
TEST_F(RSRollbackTest, RollbackApplyOpsCommand) {
- createOplog(_txn.get());
+ createOplog(_opCtx.get());
{
- AutoGetOrCreateDb autoDb(_txn.get(), "test", MODE_X);
- mongo::WriteUnitOfWork wuow(_txn.get());
+ AutoGetOrCreateDb autoDb(_opCtx.get(), "test", MODE_X);
+ mongo::WriteUnitOfWork wuow(_opCtx.get());
auto coll = autoDb.getDb()->getCollection("test.t");
if (!coll) {
- coll = autoDb.getDb()->createCollection(_txn.get(), "test.t");
+ coll = autoDb.getDb()->createCollection(_opCtx.get(), "test.t");
}
ASSERT(coll);
OpDebug* const nullOpDebug = nullptr;
ASSERT_OK(
- coll->insertDocument(_txn.get(), BSON("_id" << 1 << "v" << 2), nullOpDebug, false));
+ coll->insertDocument(_opCtx.get(), BSON("_id" << 1 << "v" << 2), nullOpDebug, false));
ASSERT_OK(
- coll->insertDocument(_txn.get(), BSON("_id" << 2 << "v" << 4), nullOpDebug, false));
- ASSERT_OK(coll->insertDocument(_txn.get(), BSON("_id" << 4), nullOpDebug, false));
+ coll->insertDocument(_opCtx.get(), BSON("_id" << 2 << "v" << 4), nullOpDebug, false));
+ ASSERT_OK(coll->insertDocument(_opCtx.get(), BSON("_id" << 4), nullOpDebug, false));
wuow.commit();
}
const auto commonOperation =
@@ -897,8 +898,8 @@ TEST_F(RSRollbackTest, RollbackApplyOpsCommand) {
mutable std::multiset<int> searchedIds;
} rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({commonOperation})));
- _createCollection(_txn.get(), "test.t", CollectionOptions());
- ASSERT_OK(syncRollback(_txn.get(),
+ _createCollection(_opCtx.get(), "test.t", CollectionOptions());
+ ASSERT_OK(syncRollback(_opCtx.get(),
OplogInterfaceMock({applyOpsOperation, commonOperation}),
rollbackSource,
{},
@@ -909,20 +910,20 @@ TEST_F(RSRollbackTest, RollbackApplyOpsCommand) {
ASSERT_EQUALS(1U, rollbackSource.searchedIds.count(3));
ASSERT_EQUALS(1U, rollbackSource.searchedIds.count(4));
- AutoGetCollectionForRead acr(_txn.get(), NamespaceString("test.t"));
+ AutoGetCollectionForRead acr(_opCtx.get(), NamespaceString("test.t"));
BSONObj result;
- ASSERT(Helpers::findOne(_txn.get(), acr.getCollection(), BSON("_id" << 1), result));
+ ASSERT(Helpers::findOne(_opCtx.get(), acr.getCollection(), BSON("_id" << 1), result));
ASSERT_EQUALS(1, result["v"].numberInt()) << result;
- ASSERT(Helpers::findOne(_txn.get(), acr.getCollection(), BSON("_id" << 2), result));
+ ASSERT(Helpers::findOne(_opCtx.get(), acr.getCollection(), BSON("_id" << 2), result));
ASSERT_EQUALS(3, result["v"].numberInt()) << result;
- ASSERT(Helpers::findOne(_txn.get(), acr.getCollection(), BSON("_id" << 3), result));
+ ASSERT(Helpers::findOne(_opCtx.get(), acr.getCollection(), BSON("_id" << 3), result));
ASSERT_EQUALS(5, result["v"].numberInt()) << result;
- ASSERT_FALSE(Helpers::findOne(_txn.get(), acr.getCollection(), BSON("_id" << 4), result))
+ ASSERT_FALSE(Helpers::findOne(_opCtx.get(), acr.getCollection(), BSON("_id" << 4), result))
<< result;
}
TEST_F(RSRollbackTest, RollbackCreateCollectionCommand) {
- createOplog(_txn.get());
+ createOplog(_opCtx.get());
auto commonOperation =
std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1));
auto createCollectionOperation =
@@ -937,22 +938,22 @@ TEST_F(RSRollbackTest, RollbackCreateCollectionCommand) {
RollbackSourceMock rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
commonOperation,
})));
- _createCollection(_txn.get(), "test.t", CollectionOptions());
- ASSERT_OK(syncRollback(_txn.get(),
+ _createCollection(_opCtx.get(), "test.t", CollectionOptions());
+ ASSERT_OK(syncRollback(_opCtx.get(),
OplogInterfaceMock({createCollectionOperation, commonOperation}),
rollbackSource,
{},
_coordinator));
{
- Lock::DBLock dbLock(_txn->lockState(), "test", MODE_S);
- auto db = dbHolder().get(_txn.get(), "test");
+ Lock::DBLock dbLock(_opCtx->lockState(), "test", MODE_S);
+ auto db = dbHolder().get(_opCtx.get(), "test");
ASSERT_TRUE(db);
ASSERT_FALSE(db->getCollection("test.t"));
}
}
TEST_F(RSRollbackTest, RollbackCollectionModificationCommand) {
- createOplog(_txn.get());
+ createOplog(_opCtx.get());
auto commonOperation =
std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1));
auto collectionModificationOperation =
@@ -979,9 +980,9 @@ TEST_F(RSRollbackTest, RollbackCollectionModificationCommand) {
RollbackSourceLocal rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
commonOperation,
})));
- _createCollection(_txn.get(), "test.t", CollectionOptions());
+ _createCollection(_opCtx.get(), "test.t", CollectionOptions());
startCapturingLogMessages();
- ASSERT_OK(syncRollback(_txn.get(),
+ ASSERT_OK(syncRollback(_opCtx.get(),
OplogInterfaceMock({collectionModificationOperation, commonOperation}),
rollbackSource,
{},
@@ -995,7 +996,7 @@ TEST_F(RSRollbackTest, RollbackCollectionModificationCommand) {
}
TEST_F(RSRollbackTest, RollbackCollectionModificationCommandInvalidCollectionOptions) {
- createOplog(_txn.get());
+ createOplog(_opCtx.get());
auto commonOperation =
std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1));
auto collectionModificationOperation =
@@ -1020,9 +1021,9 @@ TEST_F(RSRollbackTest, RollbackCollectionModificationCommandInvalidCollectionOpt
RollbackSourceLocal rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
commonOperation,
})));
- _createCollection(_txn.get(), "test.t", CollectionOptions());
+ _createCollection(_opCtx.get(), "test.t", CollectionOptions());
auto status =
- syncRollback(_txn.get(),
+ syncRollback(_opCtx.get(),
OplogInterfaceMock({collectionModificationOperation, commonOperation}),
rollbackSource,
{},
diff --git a/src/mongo/db/repl/storage_interface.cpp b/src/mongo/db/repl/storage_interface.cpp
index b9a1c25df45..5f9deb71a85 100644
--- a/src/mongo/db/repl/storage_interface.cpp
+++ b/src/mongo/db/repl/storage_interface.cpp
@@ -51,8 +51,8 @@ StorageInterface* StorageInterface::get(ServiceContext& service) {
return getStorageInterface(service).get();
}
-StorageInterface* StorageInterface::get(OperationContext* txn) {
- return get(txn->getClient()->getServiceContext());
+StorageInterface* StorageInterface::get(OperationContext* opCtx) {
+ return get(opCtx->getClient()->getServiceContext());
}
diff --git a/src/mongo/db/repl/storage_interface.h b/src/mongo/db/repl/storage_interface.h
index ae4f6f7eabf..2e87f23537d 100644
--- a/src/mongo/db/repl/storage_interface.h
+++ b/src/mongo/db/repl/storage_interface.h
@@ -91,7 +91,7 @@ public:
// Operation Context binding.
static StorageInterface* get(ServiceContext* service);
static StorageInterface* get(ServiceContext& service);
- static StorageInterface* get(OperationContext* txn);
+ static StorageInterface* get(OperationContext* opCtx);
static void set(ServiceContext* service, std::unique_ptr<StorageInterface> storageInterface);
// Constructor and Destructor.
@@ -105,7 +105,7 @@ public:
/**
* Returns true if initial sync was started but has not not completed.
*/
- virtual bool getInitialSyncFlag(OperationContext* txn) const = 0;
+ virtual bool getInitialSyncFlag(OperationContext* opCtx) const = 0;
/**
* Sets the the initial sync flag to record that initial sync has not completed.
@@ -113,7 +113,7 @@ public:
* This operation is durable and waits for durable writes (which will block on
*journaling/checkpointing).
*/
- virtual void setInitialSyncFlag(OperationContext* txn) = 0;
+ virtual void setInitialSyncFlag(OperationContext* opCtx) = 0;
/**
* Clears the the initial sync flag to record that initial sync has completed.
@@ -121,34 +121,34 @@ public:
* This operation is durable and waits for durable writes (which will block on
*journaling/checkpointing).
*/
- virtual void clearInitialSyncFlag(OperationContext* txn) = 0;
+ virtual void clearInitialSyncFlag(OperationContext* opCtx) = 0;
/**
* The minValid value is the earliest (minimum) Timestamp that must be applied in order to
* consider the dataset consistent.
*/
- virtual void setMinValid(OperationContext* txn, const OpTime& minValid) = 0;
- virtual OpTime getMinValid(OperationContext* txn) const = 0;
+ virtual void setMinValid(OperationContext* opCtx, const OpTime& minValid) = 0;
+ virtual OpTime getMinValid(OperationContext* opCtx) const = 0;
/**
* Sets minValid only if it is not already higher than endOpTime.
* Warning, this compares the term and timestamp independently. Do not use if the current
* minValid could be from the other fork of a rollback.
*/
- virtual void setMinValidToAtLeast(OperationContext* txn, const OpTime& endOpTime) = 0;
+ virtual void setMinValidToAtLeast(OperationContext* opCtx, const OpTime& endOpTime) = 0;
/**
* On startup all oplog entries with a value >= the oplog delete from point should be deleted.
* If null, no documents should be deleted.
*/
- virtual void setOplogDeleteFromPoint(OperationContext* txn, const Timestamp& timestamp) = 0;
- virtual Timestamp getOplogDeleteFromPoint(OperationContext* txn) = 0;
+ virtual void setOplogDeleteFromPoint(OperationContext* opCtx, const Timestamp& timestamp) = 0;
+ virtual Timestamp getOplogDeleteFromPoint(OperationContext* opCtx) = 0;
/**
* The applied through point is a persistent record of where we've applied through. If null, the
* applied through point is the top of the oplog.
*/
- virtual void setAppliedThrough(OperationContext* txn, const OpTime& optime) = 0;
+ virtual void setAppliedThrough(OperationContext* opCtx, const OpTime& optime) = 0;
/**
* You should probably be calling ReplicationCoordinator::getLastAppliedOpTime() instead.
@@ -156,7 +156,7 @@ public:
* This reads the value from storage which isn't always updated when the ReplicationCoordinator
* is.
*/
- virtual OpTime getAppliedThrough(OperationContext* txn) = 0;
+ virtual OpTime getAppliedThrough(OperationContext* opCtx) = 0;
// Collection creation and population for initial sync.
@@ -177,7 +177,7 @@ public:
* NOTE: If the collection doesn't exist, it will not be created, and instead
* an error is returned.
*/
- virtual Status insertDocument(OperationContext* txn,
+ virtual Status insertDocument(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& doc) = 0;
@@ -185,14 +185,14 @@ public:
* Inserts the given documents into the collection.
* It is an error to call this function with an empty set of documents.
*/
- virtual Status insertDocuments(OperationContext* txn,
+ virtual Status insertDocuments(OperationContext* opCtx,
const NamespaceString& nss,
const std::vector<BSONObj>& docs) = 0;
/**
* Creates the initial oplog, errors if it exists.
*/
- virtual Status createOplog(OperationContext* txn, const NamespaceString& nss) = 0;
+ virtual Status createOplog(OperationContext* opCtx, const NamespaceString& nss) = 0;
/**
* Returns the configured maximum size of the oplog.
@@ -200,30 +200,30 @@ public:
* Implementations are allowed to be "fuzzy" and delete documents when the actual size is
* slightly above or below this, so callers should not rely on its exact value.
*/
- virtual StatusWith<size_t> getOplogMaxSize(OperationContext* txn,
+ virtual StatusWith<size_t> getOplogMaxSize(OperationContext* opCtx,
const NamespaceString& nss) = 0;
/**
* Creates a collection.
*/
- virtual Status createCollection(OperationContext* txn,
+ virtual Status createCollection(OperationContext* opCtx,
const NamespaceString& nss,
const CollectionOptions& options) = 0;
/**
* Drops a collection, like the oplog.
*/
- virtual Status dropCollection(OperationContext* txn, const NamespaceString& nss) = 0;
+ virtual Status dropCollection(OperationContext* opCtx, const NamespaceString& nss) = 0;
/**
* Drops all databases except "local".
*/
- virtual Status dropReplicatedDatabases(OperationContext* txn) = 0;
+ virtual Status dropReplicatedDatabases(OperationContext* opCtx) = 0;
/**
* Validates that the admin database is valid during initial sync.
*/
- virtual Status isAdminDbValid(OperationContext* txn) = 0;
+ virtual Status isAdminDbValid(OperationContext* opCtx) = 0;
/**
* Finds at most "limit" documents returned by a collection or index scan on the collection in
@@ -242,7 +242,7 @@ public:
kForward = 1,
kBackward = -1,
};
- virtual StatusWith<std::vector<BSONObj>> findDocuments(OperationContext* txn,
+ virtual StatusWith<std::vector<BSONObj>> findDocuments(OperationContext* opCtx,
const NamespaceString& nss,
boost::optional<StringData> indexName,
ScanDirection scanDirection,
@@ -257,7 +257,7 @@ public:
* will be kept open once this function returns.
* If "indexName" is null, a collection scan is used to locate the document.
*/
- virtual StatusWith<std::vector<BSONObj>> deleteDocuments(OperationContext* txn,
+ virtual StatusWith<std::vector<BSONObj>> deleteDocuments(OperationContext* opCtx,
const NamespaceString& nss,
boost::optional<StringData> indexName,
ScanDirection scanDirection,
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index ce03ab1629c..ff5aa7d4260 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -102,67 +102,67 @@ NamespaceString StorageInterfaceImpl::getMinValidNss() const {
return _minValidNss;
}
-BSONObj StorageInterfaceImpl::getMinValidDocument(OperationContext* txn) const {
+BSONObj StorageInterfaceImpl::getMinValidDocument(OperationContext* opCtx) const {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IS);
- Lock::DBLock dblk(txn->lockState(), _minValidNss.db(), MODE_IS);
- Lock::CollectionLock lk(txn->lockState(), _minValidNss.ns(), MODE_IS);
+ ScopedTransaction transaction(opCtx, MODE_IS);
+ Lock::DBLock dblk(opCtx->lockState(), _minValidNss.db(), MODE_IS);
+ Lock::CollectionLock lk(opCtx->lockState(), _minValidNss.ns(), MODE_IS);
BSONObj doc;
- bool found = Helpers::getSingleton(txn, _minValidNss.ns().c_str(), doc);
+ bool found = Helpers::getSingleton(opCtx, _minValidNss.ns().c_str(), doc);
invariant(found || doc.isEmpty());
return doc;
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
- txn, "StorageInterfaceImpl::getMinValidDocument", _minValidNss.ns());
+ opCtx, "StorageInterfaceImpl::getMinValidDocument", _minValidNss.ns());
MONGO_UNREACHABLE;
}
-void StorageInterfaceImpl::updateMinValidDocument(OperationContext* txn,
+void StorageInterfaceImpl::updateMinValidDocument(OperationContext* opCtx,
const BSONObj& updateSpec) {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
+ ScopedTransaction transaction(opCtx, MODE_IX);
// For now this needs to be MODE_X because it sometimes creates the collection.
- Lock::DBLock dblk(txn->lockState(), _minValidNss.db(), MODE_X);
- Helpers::putSingleton(txn, _minValidNss.ns().c_str(), updateSpec);
+ Lock::DBLock dblk(opCtx->lockState(), _minValidNss.db(), MODE_X);
+ Helpers::putSingleton(opCtx, _minValidNss.ns().c_str(), updateSpec);
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
- txn, "StorageInterfaceImpl::updateMinValidDocument", _minValidNss.ns());
+ opCtx, "StorageInterfaceImpl::updateMinValidDocument", _minValidNss.ns());
}
-bool StorageInterfaceImpl::getInitialSyncFlag(OperationContext* txn) const {
- const BSONObj doc = getMinValidDocument(txn);
+bool StorageInterfaceImpl::getInitialSyncFlag(OperationContext* opCtx) const {
+ const BSONObj doc = getMinValidDocument(opCtx);
const auto flag = doc[kInitialSyncFlagFieldName].trueValue();
LOG(3) << "returning initial sync flag value of " << flag;
return flag;
}
-void StorageInterfaceImpl::setInitialSyncFlag(OperationContext* txn) {
+void StorageInterfaceImpl::setInitialSyncFlag(OperationContext* opCtx) {
LOG(3) << "setting initial sync flag";
- updateMinValidDocument(txn, BSON("$set" << kInitialSyncFlag));
- txn->recoveryUnit()->waitUntilDurable();
+ updateMinValidDocument(opCtx, BSON("$set" << kInitialSyncFlag));
+ opCtx->recoveryUnit()->waitUntilDurable();
}
-void StorageInterfaceImpl::clearInitialSyncFlag(OperationContext* txn) {
+void StorageInterfaceImpl::clearInitialSyncFlag(OperationContext* opCtx) {
LOG(3) << "clearing initial sync flag";
- auto replCoord = repl::ReplicationCoordinator::get(txn);
+ auto replCoord = repl::ReplicationCoordinator::get(opCtx);
OpTime time = replCoord->getMyLastAppliedOpTime();
updateMinValidDocument(
- txn,
+ opCtx,
BSON("$unset" << kInitialSyncFlag << "$set"
<< BSON("ts" << time.getTimestamp() << "t" << time.getTerm()
<< kBeginFieldName
<< time.toBSON())));
if (getGlobalServiceContext()->getGlobalStorageEngine()->isDurable()) {
- txn->recoveryUnit()->waitUntilDurable();
+ opCtx->recoveryUnit()->waitUntilDurable();
replCoord->setMyLastDurableOpTime(time);
}
}
-OpTime StorageInterfaceImpl::getMinValid(OperationContext* txn) const {
- const BSONObj doc = getMinValidDocument(txn);
+OpTime StorageInterfaceImpl::getMinValid(OperationContext* opCtx) const {
+ const BSONObj doc = getMinValidDocument(opCtx);
const auto opTimeStatus = OpTime::parseFromOplogEntry(doc);
// If any of the keys (fields) are missing from the minvalid document, we return
// a null OpTime.
@@ -182,28 +182,29 @@ OpTime StorageInterfaceImpl::getMinValid(OperationContext* txn) const {
return minValid;
}
-void StorageInterfaceImpl::setMinValid(OperationContext* txn, const OpTime& minValid) {
+void StorageInterfaceImpl::setMinValid(OperationContext* opCtx, const OpTime& minValid) {
LOG(3) << "setting minvalid to exactly: " << minValid.toString() << "(" << minValid.toBSON()
<< ")";
updateMinValidDocument(
- txn, BSON("$set" << BSON("ts" << minValid.getTimestamp() << "t" << minValid.getTerm())));
+ opCtx, BSON("$set" << BSON("ts" << minValid.getTimestamp() << "t" << minValid.getTerm())));
}
-void StorageInterfaceImpl::setMinValidToAtLeast(OperationContext* txn, const OpTime& minValid) {
+void StorageInterfaceImpl::setMinValidToAtLeast(OperationContext* opCtx, const OpTime& minValid) {
LOG(3) << "setting minvalid to at least: " << minValid.toString() << "(" << minValid.toBSON()
<< ")";
updateMinValidDocument(
- txn, BSON("$max" << BSON("ts" << minValid.getTimestamp() << "t" << minValid.getTerm())));
+ opCtx, BSON("$max" << BSON("ts" << minValid.getTimestamp() << "t" << minValid.getTerm())));
}
-void StorageInterfaceImpl::setOplogDeleteFromPoint(OperationContext* txn,
+void StorageInterfaceImpl::setOplogDeleteFromPoint(OperationContext* opCtx,
const Timestamp& timestamp) {
LOG(3) << "setting oplog delete from point to: " << timestamp.toStringPretty();
- updateMinValidDocument(txn, BSON("$set" << BSON(kOplogDeleteFromPointFieldName << timestamp)));
+ updateMinValidDocument(opCtx,
+ BSON("$set" << BSON(kOplogDeleteFromPointFieldName << timestamp)));
}
-Timestamp StorageInterfaceImpl::getOplogDeleteFromPoint(OperationContext* txn) {
- const BSONObj doc = getMinValidDocument(txn);
+Timestamp StorageInterfaceImpl::getOplogDeleteFromPoint(OperationContext* opCtx) {
+ const BSONObj doc = getMinValidDocument(opCtx);
Timestamp out = {};
if (auto field = doc[kOplogDeleteFromPointFieldName]) {
out = field.timestamp();
@@ -213,17 +214,17 @@ Timestamp StorageInterfaceImpl::getOplogDeleteFromPoint(OperationContext* txn) {
return out;
}
-void StorageInterfaceImpl::setAppliedThrough(OperationContext* txn, const OpTime& optime) {
+void StorageInterfaceImpl::setAppliedThrough(OperationContext* opCtx, const OpTime& optime) {
LOG(3) << "setting appliedThrough to: " << optime.toString() << "(" << optime.toBSON() << ")";
if (optime.isNull()) {
- updateMinValidDocument(txn, BSON("$unset" << BSON(kBeginFieldName << 1)));
+ updateMinValidDocument(opCtx, BSON("$unset" << BSON(kBeginFieldName << 1)));
} else {
- updateMinValidDocument(txn, BSON("$set" << BSON(kBeginFieldName << optime.toBSON())));
+ updateMinValidDocument(opCtx, BSON("$set" << BSON(kBeginFieldName << optime.toBSON())));
}
}
-OpTime StorageInterfaceImpl::getAppliedThrough(OperationContext* txn) {
- const BSONObj doc = getMinValidDocument(txn);
+OpTime StorageInterfaceImpl::getAppliedThrough(OperationContext* opCtx) {
+ const BSONObj doc = getMinValidDocument(opCtx);
const auto opTimeStatus = OpTime::parseFromOplogEntry(doc.getObjectField(kBeginFieldName));
if (!opTimeStatus.isOK()) {
// Return null OpTime on any parse failure, including if "begin" is missing.
@@ -253,18 +254,18 @@ StorageInterfaceImpl::createCollectionForBulkLoading(
std::unique_ptr<CollectionBulkLoader> loaderToReturn;
Collection* collection;
- auto status = runner->runSynchronousTask([&](OperationContext* txn) -> Status {
+ auto status = runner->runSynchronousTask([&](OperationContext* opCtx) -> Status {
// We are not replicating nor validating writes under this OperationContext*.
// The OperationContext* is used for all writes to the (newly) cloned collection.
- txn->setReplicatedWrites(false);
- documentValidationDisabled(txn) = true;
+ opCtx->setReplicatedWrites(false);
+ documentValidationDisabled(opCtx) = true;
// Retry if WCE.
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
// Get locks and create the collection.
- ScopedTransaction transaction(txn, MODE_IX);
- auto db = stdx::make_unique<AutoGetOrCreateDb>(txn, nss.db(), MODE_IX);
- auto coll = stdx::make_unique<AutoGetCollection>(txn, nss, MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ auto db = stdx::make_unique<AutoGetOrCreateDb>(opCtx, nss.db(), MODE_IX);
+ auto coll = stdx::make_unique<AutoGetCollection>(opCtx, nss, MODE_X);
collection = coll->getCollection();
if (collection) {
@@ -272,14 +273,14 @@ StorageInterfaceImpl::createCollectionForBulkLoading(
}
// Create the collection.
- WriteUnitOfWork wunit(txn);
- collection = db->getDb()->createCollection(txn, nss.ns(), options, false);
+ WriteUnitOfWork wunit(opCtx);
+ collection = db->getDb()->createCollection(opCtx, nss.ns(), options, false);
invariant(collection);
wunit.commit();
- coll = stdx::make_unique<AutoGetCollection>(txn, nss, MODE_IX);
+ coll = stdx::make_unique<AutoGetCollection>(opCtx, nss, MODE_IX);
// Move locks into loader, so it now controls their lifetime.
- auto loader = stdx::make_unique<CollectionBulkLoaderImpl>(txn,
+ auto loader = stdx::make_unique<CollectionBulkLoaderImpl>(opCtx,
collection,
idIndexSpec,
std::move(threadPool),
@@ -291,7 +292,7 @@ StorageInterfaceImpl::createCollectionForBulkLoading(
loaderToReturn = std::move(loader);
return Status::OK();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "beginCollectionClone", nss.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "beginCollectionClone", nss.ns());
MONGO_UNREACHABLE;
});
@@ -308,20 +309,20 @@ StorageInterfaceImpl::createCollectionForBulkLoading(
}
-Status StorageInterfaceImpl::insertDocument(OperationContext* txn,
+Status StorageInterfaceImpl::insertDocument(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& doc) {
- return insertDocuments(txn, nss, {doc});
+ return insertDocuments(opCtx, nss, {doc});
}
namespace {
-Status insertDocumentsSingleBatch(OperationContext* txn,
+Status insertDocumentsSingleBatch(OperationContext* opCtx,
const NamespaceString& nss,
std::vector<BSONObj>::const_iterator begin,
std::vector<BSONObj>::const_iterator end) {
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetCollection autoColl(txn, nss, MODE_IX);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ AutoGetCollection autoColl(opCtx, nss, MODE_IX);
auto collection = autoColl.getCollection();
if (!collection) {
return {ErrorCodes::NamespaceNotFound,
@@ -329,9 +330,9 @@ Status insertDocumentsSingleBatch(OperationContext* txn,
<< nss.ns()};
}
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
OpDebug* const nullOpDebug = nullptr;
- auto status = collection->insertDocuments(txn, begin, end, nullOpDebug, false);
+ auto status = collection->insertDocuments(opCtx, begin, end, nullOpDebug, false);
if (!status.isOK()) {
return status;
}
@@ -342,12 +343,12 @@ Status insertDocumentsSingleBatch(OperationContext* txn,
} // namespace
-Status StorageInterfaceImpl::insertDocuments(OperationContext* txn,
+Status StorageInterfaceImpl::insertDocuments(OperationContext* opCtx,
const NamespaceString& nss,
const std::vector<BSONObj>& docs) {
if (docs.size() > 1U) {
try {
- if (insertDocumentsSingleBatch(txn, nss, docs.cbegin(), docs.cend()).isOK()) {
+ if (insertDocumentsSingleBatch(opCtx, nss, docs.cbegin(), docs.cend()).isOK()) {
return Status::OK();
}
} catch (...) {
@@ -359,83 +360,84 @@ Status StorageInterfaceImpl::insertDocuments(OperationContext* txn,
// Try to insert the batch one-at-a-time because the batch failed all-at-once inserting.
for (auto it = docs.cbegin(); it != docs.cend(); ++it) {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- auto status = insertDocumentsSingleBatch(txn, nss, it, it + 1);
+ auto status = insertDocumentsSingleBatch(opCtx, nss, it, it + 1);
if (!status.isOK()) {
return status;
}
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "StorageInterfaceImpl::insertDocuments", nss.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
+ opCtx, "StorageInterfaceImpl::insertDocuments", nss.ns());
}
return Status::OK();
}
-Status StorageInterfaceImpl::dropReplicatedDatabases(OperationContext* txn) {
- dropAllDatabasesExceptLocal(txn);
+Status StorageInterfaceImpl::dropReplicatedDatabases(OperationContext* opCtx) {
+ dropAllDatabasesExceptLocal(opCtx);
return Status::OK();
}
-Status StorageInterfaceImpl::createOplog(OperationContext* txn, const NamespaceString& nss) {
- mongo::repl::createOplog(txn, nss.ns(), true);
+Status StorageInterfaceImpl::createOplog(OperationContext* opCtx, const NamespaceString& nss) {
+ mongo::repl::createOplog(opCtx, nss.ns(), true);
return Status::OK();
}
-StatusWith<size_t> StorageInterfaceImpl::getOplogMaxSize(OperationContext* txn,
+StatusWith<size_t> StorageInterfaceImpl::getOplogMaxSize(OperationContext* opCtx,
const NamespaceString& nss) {
- AutoGetCollectionForRead collection(txn, nss);
+ AutoGetCollectionForRead collection(opCtx, nss);
if (!collection.getCollection()) {
return {ErrorCodes::NamespaceNotFound,
str::stream() << "Your oplog doesn't exist: " << nss.ns()};
}
- const auto options = collection.getCollection()->getCatalogEntry()->getCollectionOptions(txn);
+ const auto options = collection.getCollection()->getCatalogEntry()->getCollectionOptions(opCtx);
if (!options.capped)
return {ErrorCodes::BadValue, str::stream() << nss.ns() << " isn't capped"};
return options.cappedSize;
}
-Status StorageInterfaceImpl::createCollection(OperationContext* txn,
+Status StorageInterfaceImpl::createCollection(OperationContext* opCtx,
const NamespaceString& nss,
const CollectionOptions& options) {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetOrCreateDb databaseWriteGuard(txn, nss.db(), MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ AutoGetOrCreateDb databaseWriteGuard(opCtx, nss.db(), MODE_X);
auto db = databaseWriteGuard.getDb();
invariant(db);
if (db->getCollection(nss)) {
return {ErrorCodes::NamespaceExists,
str::stream() << "Collection " << nss.ns() << " already exists."};
}
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
try {
- auto coll = db->createCollection(txn, nss.ns(), options);
+ auto coll = db->createCollection(opCtx, nss.ns(), options);
invariant(coll);
} catch (const UserException& ex) {
return ex.toStatus();
}
wuow.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "StorageInterfaceImpl::createCollection", nss.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "StorageInterfaceImpl::createCollection", nss.ns());
return Status::OK();
}
-Status StorageInterfaceImpl::dropCollection(OperationContext* txn, const NamespaceString& nss) {
+Status StorageInterfaceImpl::dropCollection(OperationContext* opCtx, const NamespaceString& nss) {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetDb autoDB(txn, nss.db(), MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ AutoGetDb autoDB(opCtx, nss.db(), MODE_X);
if (!autoDB.getDb()) {
// Database does not exist - nothing to do.
return Status::OK();
}
- WriteUnitOfWork wunit(txn);
- const auto status = autoDB.getDb()->dropCollection(txn, nss.ns());
+ WriteUnitOfWork wunit(opCtx);
+ const auto status = autoDB.getDb()->dropCollection(opCtx, nss.ns());
if (status.isOK()) {
wunit.commit();
}
return status;
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "StorageInterfaceImpl::dropCollection", nss.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "StorageInterfaceImpl::dropCollection", nss.ns());
}
namespace {
@@ -455,7 +457,7 @@ DeleteStageParams makeDeleteStageParamsForDeleteDocuments() {
*/
enum class FindDeleteMode { kFind, kDelete };
StatusWith<std::vector<BSONObj>> _findOrDeleteDocuments(
- OperationContext* txn,
+ OperationContext* opCtx,
const NamespaceString& nss,
boost::optional<StringData> indexName,
StorageInterface::ScanDirection scanDirection,
@@ -468,8 +470,8 @@ StatusWith<std::vector<BSONObj>> _findOrDeleteDocuments(
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
auto collectionAccessMode = isFind ? MODE_IS : MODE_IX;
- ScopedTransaction transaction(txn, collectionAccessMode);
- AutoGetCollection collectionGuard(txn, nss, collectionAccessMode);
+ ScopedTransaction transaction(opCtx, collectionAccessMode);
+ AutoGetCollection collectionGuard(opCtx, nss, collectionAccessMode);
auto collection = collectionGuard.getCollection();
if (!collection) {
return {ErrorCodes::NamespaceNotFound,
@@ -493,9 +495,9 @@ StatusWith<std::vector<BSONObj>> _findOrDeleteDocuments(
// Use collection scan.
planExecutor = isFind
? InternalPlanner::collectionScan(
- txn, nss.ns(), collection, PlanExecutor::YIELD_MANUAL, direction)
+ opCtx, nss.ns(), collection, PlanExecutor::YIELD_MANUAL, direction)
: InternalPlanner::deleteWithCollectionScan(
- txn,
+ opCtx,
collection,
makeDeleteStageParamsForDeleteDocuments(),
PlanExecutor::YIELD_MANUAL,
@@ -506,7 +508,7 @@ StatusWith<std::vector<BSONObj>> _findOrDeleteDocuments(
invariant(indexCatalog);
bool includeUnfinishedIndexes = false;
IndexDescriptor* indexDescriptor =
- indexCatalog->findIndexByName(txn, *indexName, includeUnfinishedIndexes);
+ indexCatalog->findIndexByName(opCtx, *indexName, includeUnfinishedIndexes);
if (!indexDescriptor) {
return {ErrorCodes::IndexNotFound,
str::stream() << "Index not found, ns:" << nss.ns() << ", index: "
@@ -529,7 +531,7 @@ StatusWith<std::vector<BSONObj>> _findOrDeleteDocuments(
bounds.first = startKey;
}
planExecutor = isFind
- ? InternalPlanner::indexScan(txn,
+ ? InternalPlanner::indexScan(opCtx,
collection,
indexDescriptor,
bounds.first,
@@ -538,7 +540,7 @@ StatusWith<std::vector<BSONObj>> _findOrDeleteDocuments(
PlanExecutor::YIELD_MANUAL,
direction,
InternalPlanner::IXSCAN_FETCH)
- : InternalPlanner::deleteWithIndexScan(txn,
+ : InternalPlanner::deleteWithIndexScan(opCtx,
collection,
makeDeleteStageParamsForDeleteDocuments(),
indexDescriptor,
@@ -562,33 +564,39 @@ StatusWith<std::vector<BSONObj>> _findOrDeleteDocuments(
}
return docs;
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, opStr, nss.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, opStr, nss.ns());
MONGO_UNREACHABLE;
}
} // namespace
StatusWith<std::vector<BSONObj>> StorageInterfaceImpl::findDocuments(
- OperationContext* txn,
+ OperationContext* opCtx,
const NamespaceString& nss,
boost::optional<StringData> indexName,
ScanDirection scanDirection,
const BSONObj& startKey,
BoundInclusion boundInclusion,
std::size_t limit) {
- return _findOrDeleteDocuments(
- txn, nss, indexName, scanDirection, startKey, boundInclusion, limit, FindDeleteMode::kFind);
+ return _findOrDeleteDocuments(opCtx,
+ nss,
+ indexName,
+ scanDirection,
+ startKey,
+ boundInclusion,
+ limit,
+ FindDeleteMode::kFind);
}
StatusWith<std::vector<BSONObj>> StorageInterfaceImpl::deleteDocuments(
- OperationContext* txn,
+ OperationContext* opCtx,
const NamespaceString& nss,
boost::optional<StringData> indexName,
ScanDirection scanDirection,
const BSONObj& startKey,
BoundInclusion boundInclusion,
std::size_t limit) {
- return _findOrDeleteDocuments(txn,
+ return _findOrDeleteDocuments(opCtx,
nss,
indexName,
scanDirection,
@@ -598,10 +606,10 @@ StatusWith<std::vector<BSONObj>> StorageInterfaceImpl::deleteDocuments(
FindDeleteMode::kDelete);
}
-Status StorageInterfaceImpl::isAdminDbValid(OperationContext* txn) {
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetDb autoDB(txn, "admin", MODE_X);
- return checkAdminDatabase(txn, autoDB.getDb());
+Status StorageInterfaceImpl::isAdminDbValid(OperationContext* opCtx) {
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ AutoGetDb autoDB(opCtx, "admin", MODE_X);
+ return checkAdminDatabase(opCtx, autoDB.getDb());
}
} // namespace repl
diff --git a/src/mongo/db/repl/storage_interface_impl.h b/src/mongo/db/repl/storage_interface_impl.h
index 9edd37b44c8..b3bc5eeb617 100644
--- a/src/mongo/db/repl/storage_interface_impl.h
+++ b/src/mongo/db/repl/storage_interface_impl.h
@@ -62,19 +62,19 @@ public:
*/
NamespaceString getMinValidNss() const;
- bool getInitialSyncFlag(OperationContext* txn) const override;
+ bool getInitialSyncFlag(OperationContext* opCtx) const override;
- void setInitialSyncFlag(OperationContext* txn) override;
+ void setInitialSyncFlag(OperationContext* opCtx) override;
- void clearInitialSyncFlag(OperationContext* txn) override;
+ void clearInitialSyncFlag(OperationContext* opCtx) override;
- OpTime getMinValid(OperationContext* txn) const override;
- void setMinValid(OperationContext* txn, const OpTime& minValid) override;
- void setMinValidToAtLeast(OperationContext* txn, const OpTime& endOpTime) override;
- void setOplogDeleteFromPoint(OperationContext* txn, const Timestamp& timestamp) override;
- Timestamp getOplogDeleteFromPoint(OperationContext* txn) override;
- void setAppliedThrough(OperationContext* txn, const OpTime& optime) override;
- OpTime getAppliedThrough(OperationContext* txn) override;
+ OpTime getMinValid(OperationContext* opCtx) const override;
+ void setMinValid(OperationContext* opCtx, const OpTime& minValid) override;
+ void setMinValidToAtLeast(OperationContext* opCtx, const OpTime& endOpTime) override;
+ void setOplogDeleteFromPoint(OperationContext* opCtx, const Timestamp& timestamp) override;
+ Timestamp getOplogDeleteFromPoint(OperationContext* opCtx) override;
+ void setAppliedThrough(OperationContext* opCtx, const OpTime& optime) override;
+ OpTime getAppliedThrough(OperationContext* opCtx) override;
/**
* Allocates a new TaskRunner for use by the passed in collection.
@@ -85,26 +85,27 @@ public:
const BSONObj idIndexSpec,
const std::vector<BSONObj>& secondaryIndexSpecs) override;
- Status insertDocument(OperationContext* txn,
+ Status insertDocument(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& doc) override;
- Status insertDocuments(OperationContext* txn,
+ Status insertDocuments(OperationContext* opCtx,
const NamespaceString& nss,
const std::vector<BSONObj>& docs) override;
- Status dropReplicatedDatabases(OperationContext* txn) override;
+ Status dropReplicatedDatabases(OperationContext* opCtx) override;
- Status createOplog(OperationContext* txn, const NamespaceString& nss) override;
- StatusWith<size_t> getOplogMaxSize(OperationContext* txn, const NamespaceString& nss) override;
+ Status createOplog(OperationContext* opCtx, const NamespaceString& nss) override;
+ StatusWith<size_t> getOplogMaxSize(OperationContext* opCtx,
+ const NamespaceString& nss) override;
- Status createCollection(OperationContext* txn,
+ Status createCollection(OperationContext* opCtx,
const NamespaceString& nss,
const CollectionOptions& options) override;
- Status dropCollection(OperationContext* txn, const NamespaceString& nss) override;
+ Status dropCollection(OperationContext* opCtx, const NamespaceString& nss) override;
- StatusWith<std::vector<BSONObj>> findDocuments(OperationContext* txn,
+ StatusWith<std::vector<BSONObj>> findDocuments(OperationContext* opCtx,
const NamespaceString& nss,
boost::optional<StringData> indexName,
ScanDirection scanDirection,
@@ -112,7 +113,7 @@ public:
BoundInclusion boundInclusion,
std::size_t limit) override;
- StatusWith<std::vector<BSONObj>> deleteDocuments(OperationContext* txn,
+ StatusWith<std::vector<BSONObj>> deleteDocuments(OperationContext* opCtx,
const NamespaceString& nss,
boost::optional<StringData> indexName,
ScanDirection scanDirection,
@@ -120,12 +121,12 @@ public:
BoundInclusion boundInclusion,
std::size_t limit) override;
- Status isAdminDbValid(OperationContext* txn) override;
+ Status isAdminDbValid(OperationContext* opCtx) override;
private:
// Returns empty document if not present.
- BSONObj getMinValidDocument(OperationContext* txn) const;
- void updateMinValidDocument(OperationContext* txn, const BSONObj& updateSpec);
+ BSONObj getMinValidDocument(OperationContext* opCtx) const;
+ void updateMinValidDocument(OperationContext* opCtx, const BSONObj& updateSpec);
const NamespaceString _minValidNss;
};
diff --git a/src/mongo/db/repl/storage_interface_impl_test.cpp b/src/mongo/db/repl/storage_interface_impl_test.cpp
index c44e281f012..2ee2cbf2259 100644
--- a/src/mongo/db/repl/storage_interface_impl_test.cpp
+++ b/src/mongo/db/repl/storage_interface_impl_test.cpp
@@ -87,17 +87,17 @@ NamespaceString makeNamespace(const T& t, const char* suffix = "") {
/**
* Returns min valid document.
*/
-BSONObj getMinValidDocument(OperationContext* txn, const NamespaceString& minValidNss) {
+BSONObj getMinValidDocument(OperationContext* opCtx, const NamespaceString& minValidNss) {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IS);
- Lock::DBLock dblk(txn->lockState(), minValidNss.db(), MODE_IS);
- Lock::CollectionLock lk(txn->lockState(), minValidNss.ns(), MODE_IS);
+ ScopedTransaction transaction(opCtx, MODE_IS);
+ Lock::DBLock dblk(opCtx->lockState(), minValidNss.db(), MODE_IS);
+ Lock::CollectionLock lk(opCtx->lockState(), minValidNss.ns(), MODE_IS);
BSONObj mv;
- if (Helpers::getSingleton(txn, minValidNss.ns().c_str(), mv)) {
+ if (Helpers::getSingleton(opCtx, minValidNss.ns().c_str(), mv)) {
return mv;
}
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "getMinValidDocument", minValidNss.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "getMinValidDocument", minValidNss.ns());
return BSONObj();
}
@@ -116,21 +116,21 @@ CollectionOptions createOplogCollectionOptions() {
* Create test collection.
* Returns collection.
*/
-void createCollection(OperationContext* txn,
+void createCollection(OperationContext* opCtx,
const NamespaceString& nss,
const CollectionOptions& options = CollectionOptions()) {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dblk(txn->lockState(), nss.db(), MODE_X);
- OldClientContext ctx(txn, nss.ns());
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock dblk(opCtx->lockState(), nss.db(), MODE_X);
+ OldClientContext ctx(opCtx, nss.ns());
auto db = ctx.db();
ASSERT_TRUE(db);
- mongo::WriteUnitOfWork wuow(txn);
- auto coll = db->createCollection(txn, nss.ns(), options);
+ mongo::WriteUnitOfWork wuow(opCtx);
+ auto coll = db->createCollection(opCtx, nss.ns(), options);
ASSERT_TRUE(coll);
wuow.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createCollection", nss.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "createCollection", nss.ns());
}
/**
@@ -158,11 +158,11 @@ ReplSettings createReplSettings() {
/**
* Counts the number of keys in an index using an IndexAccessMethod::validate call.
*/
-int64_t getIndexKeyCount(OperationContext* txn, IndexCatalog* cat, IndexDescriptor* desc) {
+int64_t getIndexKeyCount(OperationContext* opCtx, IndexCatalog* cat, IndexDescriptor* desc) {
auto idx = cat->getIndex(desc);
int64_t numKeys;
ValidateResults fullRes;
- idx->validate(txn, &numKeys, &fullRes);
+ idx->validate(opCtx, &numKeys, &fullRes);
return numKeys;
}
@@ -191,28 +191,28 @@ protected:
ServiceContextMongoDTest::setUp();
createOptCtx();
_coordinator =
- new ReplicationCoordinatorMock(_txn->getServiceContext(), createReplSettings());
+ new ReplicationCoordinatorMock(_opCtx->getServiceContext(), createReplSettings());
setGlobalReplicationCoordinator(_coordinator);
}
void tearDown() override {
- _txn.reset(nullptr);
+ _opCtx.reset(nullptr);
ServiceContextMongoDTest::tearDown();
}
void createOptCtx() {
- _txn = cc().makeOperationContext();
+ _opCtx = cc().makeOperationContext();
// We are not replicating nor validating these writes.
- _txn->setReplicatedWrites(false);
- DisableDocumentValidation validationDisabler(_txn.get());
+ _opCtx->setReplicatedWrites(false);
+ DisableDocumentValidation validationDisabler(_opCtx.get());
}
OperationContext* getOperationContext() {
- return _txn.get();
+ return _opCtx.get();
}
private:
- ServiceContext::UniqueOperationContext _txn;
+ ServiceContext::UniqueOperationContext _opCtx;
// Owned by service context
ReplicationCoordinator* _coordinator;
@@ -249,23 +249,23 @@ TEST_F(StorageInterfaceImplTest, InitialSyncFlag) {
NamespaceString nss("local.StorageInterfaceImplTest_InitialSyncFlag");
StorageInterfaceImpl storageInterface(nss);
- auto txn = getClient()->makeOperationContext();
+ auto opCtx = getClient()->makeOperationContext();
// Initial sync flag should be unset after initializing a new storage engine.
- ASSERT_FALSE(storageInterface.getInitialSyncFlag(txn.get()));
+ ASSERT_FALSE(storageInterface.getInitialSyncFlag(opCtx.get()));
// Setting initial sync flag should affect getInitialSyncFlag() result.
- storageInterface.setInitialSyncFlag(txn.get());
- ASSERT_TRUE(storageInterface.getInitialSyncFlag(txn.get()));
+ storageInterface.setInitialSyncFlag(opCtx.get());
+ ASSERT_TRUE(storageInterface.getInitialSyncFlag(opCtx.get()));
// Check min valid document using storage engine interface.
- auto minValidDocument = getMinValidDocument(txn.get(), nss);
+ auto minValidDocument = getMinValidDocument(opCtx.get(), nss);
ASSERT_TRUE(minValidDocument.hasField(StorageInterfaceImpl::kInitialSyncFlagFieldName));
ASSERT_TRUE(minValidDocument.getBoolField(StorageInterfaceImpl::kInitialSyncFlagFieldName));
// Clearing initial sync flag should affect getInitialSyncFlag() result.
- storageInterface.clearInitialSyncFlag(txn.get());
- ASSERT_FALSE(storageInterface.getInitialSyncFlag(txn.get()));
+ storageInterface.clearInitialSyncFlag(opCtx.get());
+ ASSERT_FALSE(storageInterface.getInitialSyncFlag(opCtx.get()));
}
TEST_F(StorageInterfaceImplTest, GetMinValidAfterSettingInitialSyncFlagWorks) {
@@ -273,53 +273,54 @@ TEST_F(StorageInterfaceImplTest, GetMinValidAfterSettingInitialSyncFlagWorks) {
"local.StorageInterfaceImplTest_GetMinValidAfterSettingInitialSyncFlagWorks");
StorageInterfaceImpl storageInterface(nss);
- auto txn = getClient()->makeOperationContext();
+ auto opCtx = getClient()->makeOperationContext();
// Initial sync flag should be unset after initializing a new storage engine.
- ASSERT_FALSE(storageInterface.getInitialSyncFlag(txn.get()));
+ ASSERT_FALSE(storageInterface.getInitialSyncFlag(opCtx.get()));
// Setting initial sync flag should affect getInitialSyncFlag() result.
- storageInterface.setInitialSyncFlag(txn.get());
- ASSERT_TRUE(storageInterface.getInitialSyncFlag(txn.get()));
+ storageInterface.setInitialSyncFlag(opCtx.get());
+ ASSERT_TRUE(storageInterface.getInitialSyncFlag(opCtx.get()));
- ASSERT(storageInterface.getMinValid(txn.get()).isNull());
- ASSERT(storageInterface.getAppliedThrough(txn.get()).isNull());
- ASSERT(storageInterface.getOplogDeleteFromPoint(txn.get()).isNull());
+ ASSERT(storageInterface.getMinValid(opCtx.get()).isNull());
+ ASSERT(storageInterface.getAppliedThrough(opCtx.get()).isNull());
+ ASSERT(storageInterface.getOplogDeleteFromPoint(opCtx.get()).isNull());
}
TEST_F(StorageInterfaceImplTest, MinValid) {
NamespaceString nss("local.StorageInterfaceImplTest_MinValid");
StorageInterfaceImpl storageInterface(nss);
- auto txn = getClient()->makeOperationContext();
+ auto opCtx = getClient()->makeOperationContext();
// MinValid boundaries should all be null after initializing a new storage engine.
- ASSERT(storageInterface.getMinValid(txn.get()).isNull());
- ASSERT(storageInterface.getAppliedThrough(txn.get()).isNull());
- ASSERT(storageInterface.getOplogDeleteFromPoint(txn.get()).isNull());
+ ASSERT(storageInterface.getMinValid(opCtx.get()).isNull());
+ ASSERT(storageInterface.getAppliedThrough(opCtx.get()).isNull());
+ ASSERT(storageInterface.getOplogDeleteFromPoint(opCtx.get()).isNull());
// Setting min valid boundaries should affect getMinValid() result.
OpTime startOpTime({Seconds(123), 0}, 1LL);
OpTime endOpTime({Seconds(456), 0}, 1LL);
- storageInterface.setAppliedThrough(txn.get(), startOpTime);
- storageInterface.setMinValid(txn.get(), endOpTime);
- storageInterface.setOplogDeleteFromPoint(txn.get(), endOpTime.getTimestamp());
+ storageInterface.setAppliedThrough(opCtx.get(), startOpTime);
+ storageInterface.setMinValid(opCtx.get(), endOpTime);
+ storageInterface.setOplogDeleteFromPoint(opCtx.get(), endOpTime.getTimestamp());
- ASSERT_EQ(storageInterface.getAppliedThrough(txn.get()), startOpTime);
- ASSERT_EQ(storageInterface.getMinValid(txn.get()), endOpTime);
- ASSERT_EQ(storageInterface.getOplogDeleteFromPoint(txn.get()), endOpTime.getTimestamp());
+ ASSERT_EQ(storageInterface.getAppliedThrough(opCtx.get()), startOpTime);
+ ASSERT_EQ(storageInterface.getMinValid(opCtx.get()), endOpTime);
+ ASSERT_EQ(storageInterface.getOplogDeleteFromPoint(opCtx.get()), endOpTime.getTimestamp());
// setMinValid always changes minValid, but setMinValidToAtLeast only does if higher.
- storageInterface.setMinValid(txn.get(), startOpTime); // Forcibly lower it.
- ASSERT_EQ(storageInterface.getMinValid(txn.get()), startOpTime);
- storageInterface.setMinValidToAtLeast(txn.get(), endOpTime); // Higher than current (sets it).
- ASSERT_EQ(storageInterface.getMinValid(txn.get()), endOpTime);
- storageInterface.setMinValidToAtLeast(txn.get(), startOpTime); // Lower than current (no-op).
- ASSERT_EQ(storageInterface.getMinValid(txn.get()), endOpTime);
+ storageInterface.setMinValid(opCtx.get(), startOpTime); // Forcibly lower it.
+ ASSERT_EQ(storageInterface.getMinValid(opCtx.get()), startOpTime);
+ storageInterface.setMinValidToAtLeast(opCtx.get(),
+ endOpTime); // Higher than current (sets it).
+ ASSERT_EQ(storageInterface.getMinValid(opCtx.get()), endOpTime);
+ storageInterface.setMinValidToAtLeast(opCtx.get(), startOpTime); // Lower than current (no-op).
+ ASSERT_EQ(storageInterface.getMinValid(opCtx.get()), endOpTime);
// Check min valid document using storage engine interface.
- auto minValidDocument = getMinValidDocument(txn.get(), nss);
+ auto minValidDocument = getMinValidDocument(opCtx.get(), nss);
ASSERT_TRUE(minValidDocument.hasField(StorageInterfaceImpl::kBeginFieldName));
ASSERT_TRUE(minValidDocument[StorageInterfaceImpl::kBeginFieldName].isABSONObj());
ASSERT_EQUALS(startOpTime,
@@ -330,45 +331,45 @@ TEST_F(StorageInterfaceImplTest, MinValid) {
endOpTime.getTimestamp(),
minValidDocument[StorageInterfaceImpl::kOplogDeleteFromPointFieldName].timestamp());
- // Recovery unit will be owned by "txn".
+ // Recovery unit will be owned by "opCtx".
RecoveryUnitWithDurabilityTracking* recoveryUnit = new RecoveryUnitWithDurabilityTracking();
- txn->setRecoveryUnit(recoveryUnit, OperationContext::kNotInUnitOfWork);
+ opCtx->setRecoveryUnit(recoveryUnit, OperationContext::kNotInUnitOfWork);
// Set min valid without waiting for the changes to be durable.
OpTime endOpTime2({Seconds(789), 0}, 1LL);
- storageInterface.setMinValid(txn.get(), endOpTime2);
- storageInterface.setAppliedThrough(txn.get(), {});
- ASSERT_EQUALS(storageInterface.getAppliedThrough(txn.get()), OpTime());
- ASSERT_EQUALS(storageInterface.getMinValid(txn.get()), endOpTime2);
+ storageInterface.setMinValid(opCtx.get(), endOpTime2);
+ storageInterface.setAppliedThrough(opCtx.get(), {});
+ ASSERT_EQUALS(storageInterface.getAppliedThrough(opCtx.get()), OpTime());
+ ASSERT_EQUALS(storageInterface.getMinValid(opCtx.get()), endOpTime2);
ASSERT_FALSE(recoveryUnit->waitUntilDurableCalled);
}
TEST_F(StorageInterfaceImplTest, SnapshotSupported) {
- auto txn = getClient()->makeOperationContext();
- Status status = txn->recoveryUnit()->setReadFromMajorityCommittedSnapshot();
+ auto opCtx = getClient()->makeOperationContext();
+ Status status = opCtx->recoveryUnit()->setReadFromMajorityCommittedSnapshot();
ASSERT(status.isOK());
}
TEST_F(StorageInterfaceImplTest, InsertDocumentsReturnsOKWhenNoOperationsAreGiven) {
- auto txn = getClient()->makeOperationContext();
+ auto opCtx = getClient()->makeOperationContext();
NamespaceString nss("local." + _agent.getTestName());
- createCollection(txn.get(), nss);
+ createCollection(opCtx.get(), nss);
StorageInterfaceImpl storageInterface(nss);
- ASSERT_OK(storageInterface.insertDocuments(txn.get(), nss, {}));
+ ASSERT_OK(storageInterface.insertDocuments(opCtx.get(), nss, {}));
}
TEST_F(StorageInterfaceImplTest,
InsertDocumentsReturnsInternalErrorWhenSavingOperationToNonOplogCollection) {
// Create fake non-oplog collection to ensure saving oplog entries (without _id field) will
// fail.
- auto txn = getClient()->makeOperationContext();
+ auto opCtx = getClient()->makeOperationContext();
NamespaceString nss("local." + _agent.getSuiteName() + "_" + _agent.getTestName());
- createCollection(txn.get(), nss);
+ createCollection(opCtx.get(), nss);
// Non-oplog collection will enforce mandatory _id field requirement on insertion.
StorageInterfaceImpl storageInterface(nss);
auto op = makeOplogEntry({Timestamp(Seconds(1), 0), 1LL});
- auto status = storageInterface.insertDocuments(txn.get(), nss, {op});
+ auto status = storageInterface.insertDocuments(opCtx.get(), nss, {op});
ASSERT_EQUALS(ErrorCodes::InternalError, status);
ASSERT_STRING_CONTAINS(status.reason(), "Collection::insertDocument got document without _id");
}
@@ -376,12 +377,12 @@ TEST_F(StorageInterfaceImplTest,
TEST_F(StorageInterfaceImplTest,
InsertDocumentsInsertsDocumentsOneAtATimeWhenAllAtOnceInsertingFails) {
// Create a collection that does not support all-at-once inserting.
- auto txn = getClient()->makeOperationContext();
+ auto opCtx = getClient()->makeOperationContext();
NamespaceString nss("local." + _agent.getSuiteName() + "_" + _agent.getTestName());
CollectionOptions options;
options.capped = true;
options.cappedSize = 1024 * 1024;
- createCollection(txn.get(), nss, options);
+ createCollection(opCtx.get(), nss, options);
// StorageInterfaceImpl::insertDocuments should fall back on inserting the batch one at a time.
StorageInterfaceImpl storageInterface(nss);
auto doc1 = BSON("_id" << 1);
@@ -389,16 +390,16 @@ TEST_F(StorageInterfaceImplTest,
std::vector<BSONObj> docs({doc1, doc2});
// Confirm that Collection::insertDocuments fails to insert the batch all at once.
{
- AutoGetCollection autoCollection(txn.get(), nss, MODE_IX);
- WriteUnitOfWork wunit(txn.get());
+ AutoGetCollection autoCollection(opCtx.get(), nss, MODE_IX);
+ WriteUnitOfWork wunit(opCtx.get());
ASSERT_EQUALS(ErrorCodes::OperationCannotBeBatched,
autoCollection.getCollection()->insertDocuments(
- txn.get(), docs.begin(), docs.cend(), nullptr, false));
+ opCtx.get(), docs.begin(), docs.cend(), nullptr, false));
}
- ASSERT_OK(storageInterface.insertDocuments(txn.get(), nss, docs));
+ ASSERT_OK(storageInterface.insertDocuments(opCtx.get(), nss, docs));
// Check collection contents. OplogInterface returns documents in reverse natural order.
- OplogInterfaceLocal oplog(txn.get(), nss.ns());
+ OplogInterfaceLocal oplog(opCtx.get(), nss.ns());
auto iter = oplog.makeIterator();
ASSERT_BSONOBJ_EQ(doc2, unittest::assertGet(iter->next()).first);
ASSERT_BSONOBJ_EQ(doc1, unittest::assertGet(iter->next()).first);
@@ -407,19 +408,19 @@ TEST_F(StorageInterfaceImplTest,
TEST_F(StorageInterfaceImplTest, InsertDocumentsSavesOperationsReturnsOpTimeOfLastOperation) {
// Create fake oplog collection to hold operations.
- auto txn = getClient()->makeOperationContext();
+ auto opCtx = getClient()->makeOperationContext();
NamespaceString nss("local." + _agent.getSuiteName() + "_" + _agent.getTestName());
- createCollection(txn.get(), nss, createOplogCollectionOptions());
+ createCollection(opCtx.get(), nss, createOplogCollectionOptions());
// Insert operations using storage interface. Ensure optime return is consistent with last
// operation inserted.
StorageInterfaceImpl storageInterface(nss);
auto op1 = makeOplogEntry({Timestamp(Seconds(1), 0), 1LL});
auto op2 = makeOplogEntry({Timestamp(Seconds(1), 0), 1LL});
- ASSERT_OK(storageInterface.insertDocuments(txn.get(), nss, {op1, op2}));
+ ASSERT_OK(storageInterface.insertDocuments(opCtx.get(), nss, {op1, op2}));
// Check contents of oplog. OplogInterface iterates over oplog collection in reverse.
- repl::OplogInterfaceLocal oplog(txn.get(), nss.ns());
+ repl::OplogInterfaceLocal oplog(opCtx.get(), nss.ns());
auto iter = oplog.makeIterator();
ASSERT_BSONOBJ_EQ(op2, unittest::assertGet(iter->next()).first);
ASSERT_BSONOBJ_EQ(op1, unittest::assertGet(iter->next()).first);
@@ -431,46 +432,46 @@ TEST_F(StorageInterfaceImplTest,
auto op = makeOplogEntry({Timestamp(Seconds(1), 0), 1LL});
NamespaceString nss("local.nosuchcollection");
StorageInterfaceImpl storageInterface(nss);
- auto txn = getClient()->makeOperationContext();
- auto status = storageInterface.insertDocuments(txn.get(), nss, {op});
+ auto opCtx = getClient()->makeOperationContext();
+ auto status = storageInterface.insertDocuments(opCtx.get(), nss, {op});
ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, status);
ASSERT_STRING_CONTAINS(status.reason(), "The collection must exist before inserting documents");
}
TEST_F(StorageInterfaceImplWithReplCoordTest, InsertMissingDocWorksOnExistingCappedCollection) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
NamespaceString nss("foo.bar");
CollectionOptions opts;
opts.capped = true;
opts.cappedSize = 1024 * 1024;
- createCollection(txn, nss, opts);
- ASSERT_OK(storage.insertDocument(txn, nss, BSON("_id" << 1)));
- AutoGetCollectionForRead autoColl(txn, nss);
+ createCollection(opCtx, nss, opts);
+ ASSERT_OK(storage.insertDocument(opCtx, nss, BSON("_id" << 1)));
+ AutoGetCollectionForRead autoColl(opCtx, nss);
ASSERT_TRUE(autoColl.getCollection());
}
TEST_F(StorageInterfaceImplWithReplCoordTest, InsertMissingDocWorksOnExistingCollection) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
NamespaceString nss("foo.bar");
- createCollection(txn, nss);
- ASSERT_OK(storage.insertDocument(txn, nss, BSON("_id" << 1)));
- AutoGetCollectionForRead autoColl(txn, nss);
+ createCollection(opCtx, nss);
+ ASSERT_OK(storage.insertDocument(opCtx, nss, BSON("_id" << 1)));
+ AutoGetCollectionForRead autoColl(opCtx, nss);
ASSERT_TRUE(autoColl.getCollection());
}
TEST_F(StorageInterfaceImplWithReplCoordTest, InsertMissingDocFailesIfCollectionIsMissing) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
NamespaceString nss("foo.bar");
- const auto status = storage.insertDocument(txn, nss, BSON("_id" << 1));
+ const auto status = storage.insertDocument(opCtx, nss, BSON("_id" << 1));
ASSERT_NOT_OK(status);
ASSERT_EQ(status.code(), ErrorCodes::NamespaceNotFound);
}
TEST_F(StorageInterfaceImplWithReplCoordTest, CreateCollectionWithIDIndexCommits) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
storage.startup();
NamespaceString nss("foo.bar");
@@ -484,18 +485,18 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, CreateCollectionWithIDIndexCommits
ASSERT_OK(loader->insertDocuments(docs.begin(), docs.end()));
ASSERT_OK(loader->commit());
- AutoGetCollectionForRead autoColl(txn, nss);
+ AutoGetCollectionForRead autoColl(opCtx, nss);
auto coll = autoColl.getCollection();
ASSERT(coll);
- ASSERT_EQ(coll->getRecordStore()->numRecords(txn), 2LL);
+ ASSERT_EQ(coll->getRecordStore()->numRecords(opCtx), 2LL);
auto collIdxCat = coll->getIndexCatalog();
- auto idIdxDesc = collIdxCat->findIdIndex(txn);
- auto count = getIndexKeyCount(txn, collIdxCat, idIdxDesc);
+ auto idIdxDesc = collIdxCat->findIdIndex(opCtx);
+ auto count = getIndexKeyCount(opCtx, collIdxCat, idIdxDesc);
ASSERT_EQ(count, 2LL);
}
void _testDestroyUncommitedCollectionBulkLoader(
- OperationContext* txn,
+ OperationContext* opCtx,
std::vector<BSONObj> secondaryIndexes,
stdx::function<void(std::unique_ptr<CollectionBulkLoader> loader)> destroyLoaderFn) {
StorageInterfaceImpl storage;
@@ -513,23 +514,23 @@ void _testDestroyUncommitedCollectionBulkLoader(
// Collection and ID index should not exist after 'loader' is destroyed.
destroyLoaderFn(std::move(loader));
- AutoGetCollectionForRead autoColl(txn, nss);
+ AutoGetCollectionForRead autoColl(opCtx, nss);
auto coll = autoColl.getCollection();
// Bulk loader is used to create indexes. The collection is not dropped when the bulk loader is
// destroyed.
ASSERT_TRUE(coll);
- ASSERT_EQ(1LL, coll->getRecordStore()->numRecords(txn));
+ ASSERT_EQ(1LL, coll->getRecordStore()->numRecords(opCtx));
// IndexCatalog::numIndexesTotal() includes unfinished indexes. We need to ensure that
// the bulk loader drops the unfinished indexes.
auto collIdxCat = coll->getIndexCatalog();
- ASSERT_EQUALS(0, collIdxCat->numIndexesTotal(txn));
+ ASSERT_EQUALS(0, collIdxCat->numIndexesTotal(opCtx));
}
TEST_F(StorageInterfaceImplWithReplCoordTest,
DestroyingUncommittedCollectionBulkLoaderDropsIndexes) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
NamespaceString nss("foo.bar");
std::vector<BSONObj> indexes = {BSON("v" << 1 << "key" << BSON("x" << 1) << "name"
<< "x_1"
@@ -538,24 +539,24 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
auto destroyLoaderFn = [](std::unique_ptr<CollectionBulkLoader> loader) {
// Destroy 'loader' by letting it go out of scope.
};
- _testDestroyUncommitedCollectionBulkLoader(txn, indexes, destroyLoaderFn);
+ _testDestroyUncommitedCollectionBulkLoader(opCtx, indexes, destroyLoaderFn);
}
TEST_F(StorageInterfaceImplWithReplCoordTest,
DestructorInitializesClientBeforeDestroyingIdIndexBuilder) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
NamespaceString nss("foo.bar");
std::vector<BSONObj> indexes;
auto destroyLoaderFn = [](std::unique_ptr<CollectionBulkLoader> loader) {
// Destroy 'loader' in a new thread that does not have a Client.
stdx::thread([&loader]() { loader.reset(); }).join();
};
- _testDestroyUncommitedCollectionBulkLoader(txn, indexes, destroyLoaderFn);
+ _testDestroyUncommitedCollectionBulkLoader(opCtx, indexes, destroyLoaderFn);
}
TEST_F(StorageInterfaceImplWithReplCoordTest,
DestructorInitializesClientBeforeDestroyingSecondaryIndexesBuilder) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
NamespaceString nss("foo.bar");
std::vector<BSONObj> indexes = {BSON("v" << 1 << "key" << BSON("x" << 1) << "name"
<< "x_1"
@@ -565,15 +566,15 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
// Destroy 'loader' in a new thread that does not have a Client.
stdx::thread([&loader]() { loader.reset(); }).join();
};
- _testDestroyUncommitedCollectionBulkLoader(txn, indexes, destroyLoaderFn);
+ _testDestroyUncommitedCollectionBulkLoader(opCtx, indexes, destroyLoaderFn);
}
TEST_F(StorageInterfaceImplWithReplCoordTest, CreateCollectionThatAlreadyExistsFails) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
storage.startup();
NamespaceString nss("test.system.indexes");
- createCollection(txn, nss);
+ createCollection(opCtx, nss);
const CollectionOptions opts;
const std::vector<BSONObj> indexes;
@@ -583,16 +584,16 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, CreateCollectionThatAlreadyExistsF
}
TEST_F(StorageInterfaceImplWithReplCoordTest, CreateOplogCreateCappedCollection) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
NamespaceString nss("local.oplog.X");
{
- AutoGetCollectionForRead autoColl(txn, nss);
+ AutoGetCollectionForRead autoColl(opCtx, nss);
ASSERT_FALSE(autoColl.getCollection());
}
- ASSERT_OK(storage.createOplog(txn, nss));
+ ASSERT_OK(storage.createOplog(opCtx, nss));
{
- AutoGetCollectionForRead autoColl(txn, nss);
+ AutoGetCollectionForRead autoColl(opCtx, nss);
ASSERT_TRUE(autoColl.getCollection());
ASSERT_EQ(nss.toString(), autoColl.getCollection()->ns().toString());
ASSERT_TRUE(autoColl.getCollection()->isCapped());
@@ -601,78 +602,78 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, CreateOplogCreateCappedCollection)
TEST_F(StorageInterfaceImplWithReplCoordTest,
CreateCollectionReturnsUserExceptionAsStatusIfCollectionCreationThrows) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
NamespaceString nss("local.oplog.Y");
{
- AutoGetCollectionForRead autoColl(txn, nss);
+ AutoGetCollectionForRead autoColl(opCtx, nss);
ASSERT_FALSE(autoColl.getCollection());
}
- auto status = storage.createCollection(txn, nss, CollectionOptions());
+ auto status = storage.createCollection(opCtx, nss, CollectionOptions());
ASSERT_EQUALS(ErrorCodes::fromInt(28838), status);
ASSERT_STRING_CONTAINS(status.reason(), "cannot create a non-capped oplog collection");
}
TEST_F(StorageInterfaceImplWithReplCoordTest, CreateCollectionFailsIfCollectionExists) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
auto nss = makeNamespace(_agent);
{
- AutoGetCollectionForRead autoColl(txn, nss);
+ AutoGetCollectionForRead autoColl(opCtx, nss);
ASSERT_FALSE(autoColl.getCollection());
}
- ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions()));
+ ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions()));
{
- AutoGetCollectionForRead autoColl(txn, nss);
+ AutoGetCollectionForRead autoColl(opCtx, nss);
ASSERT_TRUE(autoColl.getCollection());
ASSERT_EQ(nss.toString(), autoColl.getCollection()->ns().toString());
}
- auto status = storage.createCollection(txn, nss, CollectionOptions());
+ auto status = storage.createCollection(opCtx, nss, CollectionOptions());
ASSERT_EQUALS(ErrorCodes::NamespaceExists, status);
ASSERT_STRING_CONTAINS(status.reason(),
str::stream() << "Collection " << nss.ns() << " already exists");
}
TEST_F(StorageInterfaceImplWithReplCoordTest, DropCollectionWorksWithExistingWithDataCollection) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
NamespaceString nss("foo.bar");
- createCollection(txn, nss);
- ASSERT_OK(storage.insertDocument(txn, nss, BSON("_id" << 1)));
- ASSERT_OK(storage.dropCollection(txn, nss));
+ createCollection(opCtx, nss);
+ ASSERT_OK(storage.insertDocument(opCtx, nss, BSON("_id" << 1)));
+ ASSERT_OK(storage.dropCollection(opCtx, nss));
}
TEST_F(StorageInterfaceImplWithReplCoordTest, DropCollectionWorksWithExistingEmptyCollection) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
NamespaceString nss("foo.bar");
- createCollection(txn, nss);
- ASSERT_OK(storage.dropCollection(txn, nss));
- AutoGetCollectionForRead autoColl(txn, nss);
+ createCollection(opCtx, nss);
+ ASSERT_OK(storage.dropCollection(opCtx, nss));
+ AutoGetCollectionForRead autoColl(opCtx, nss);
ASSERT_FALSE(autoColl.getCollection());
}
TEST_F(StorageInterfaceImplWithReplCoordTest, DropCollectionWorksWithMissingCollection) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
NamespaceString nss("foo.bar");
- ASSERT_FALSE(AutoGetDb(txn, nss.db(), MODE_IS).getDb());
- ASSERT_OK(storage.dropCollection(txn, nss));
- ASSERT_FALSE(AutoGetCollectionForRead(txn, nss).getCollection());
+ ASSERT_FALSE(AutoGetDb(opCtx, nss.db(), MODE_IS).getDb());
+ ASSERT_OK(storage.dropCollection(opCtx, nss));
+ ASSERT_FALSE(AutoGetCollectionForRead(opCtx, nss).getCollection());
// Database should not be created after running dropCollection.
- ASSERT_FALSE(AutoGetDb(txn, nss.db(), MODE_IS).getDb());
+ ASSERT_FALSE(AutoGetDb(opCtx, nss.db(), MODE_IS).getDb());
}
TEST_F(StorageInterfaceImplWithReplCoordTest,
FindDocumentsReturnsInvalidNamespaceIfCollectionIsMissing) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
auto nss = makeNamespace(_agent);
auto indexName = "_id_"_sd;
ASSERT_EQUALS(ErrorCodes::NamespaceNotFound,
storage
- .findDocuments(txn,
+ .findDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kForward,
@@ -683,14 +684,14 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
}
TEST_F(StorageInterfaceImplWithReplCoordTest, FindDocumentsReturnsIndexNotFoundIfIndexIsMissing) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
auto nss = makeNamespace(_agent);
auto indexName = "nonexistent"_sd;
- ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions()));
+ ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions()));
ASSERT_EQUALS(ErrorCodes::IndexNotFound,
storage
- .findDocuments(txn,
+ .findDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kForward,
@@ -702,7 +703,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, FindDocumentsReturnsIndexNotFoundI
TEST_F(StorageInterfaceImplWithReplCoordTest,
FindDocumentsReturnsIndexOptionsConflictIfIndexIsAPartialIndex) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
storage.startup();
auto nss = makeNamespace(_agent);
@@ -720,7 +721,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
auto indexName = "x_1"_sd;
ASSERT_EQUALS(ErrorCodes::IndexOptionsConflict,
storage
- .findDocuments(txn,
+ .findDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kForward,
@@ -731,12 +732,12 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
}
TEST_F(StorageInterfaceImplWithReplCoordTest, FindDocumentsReturnsEmptyVectorIfCollectionIsEmpty) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
auto nss = makeNamespace(_agent);
auto indexName = "_id_"_sd;
- ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions()));
- ASSERT_TRUE(unittest::assertGet(storage.findDocuments(txn,
+ ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions()));
+ ASSERT_TRUE(unittest::assertGet(storage.findDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kForward,
@@ -765,12 +766,12 @@ std::string _toString(const std::vector<BSONObj>& docs) {
/**
* Check collection contents. OplogInterface returns documents in reverse natural order.
*/
-void _assertDocumentsInCollectionEquals(OperationContext* txn,
+void _assertDocumentsInCollectionEquals(OperationContext* opCtx,
const NamespaceString& nss,
const std::vector<BSONObj>& docs) {
std::vector<BSONObj> reversedDocs(docs);
std::reverse(reversedDocs.begin(), reversedDocs.end());
- OplogInterfaceLocal oplog(txn, nss.ns());
+ OplogInterfaceLocal oplog(opCtx, nss.ns());
auto iter = oplog.makeIterator();
for (const auto& doc : reversedDocs) {
ASSERT_BSONOBJ_EQ(doc, unittest::assertGet(iter->next()).first);
@@ -805,12 +806,12 @@ BSONObj _assetGetFront(const StatusWith<std::vector<BSONObj>>& statusWithDocs) {
TEST_F(StorageInterfaceImplWithReplCoordTest,
FindDocumentsReturnsDocumentWithLowestKeyValueIfScanDirectionIsForward) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
auto nss = makeNamespace(_agent);
auto indexName = "_id_"_sd;
- ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions()));
- ASSERT_OK(storage.insertDocuments(txn,
+ ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions()));
+ ASSERT_OK(storage.insertDocuments(opCtx,
nss,
{BSON("_id" << 0),
BSON("_id" << 1),
@@ -821,7 +822,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
// startKey not provided
ASSERT_BSONOBJ_EQ(
BSON("_id" << 0),
- _assetGetFront(storage.findDocuments(txn,
+ _assetGetFront(storage.findDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kForward,
@@ -830,7 +831,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
1U)));
// startKey not provided. limit is 0.
- _assertDocumentsEqual(storage.findDocuments(txn,
+ _assertDocumentsEqual(storage.findDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kForward,
@@ -840,7 +841,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
{});
// startKey not provided. limit of 2.
- _assertDocumentsEqual(storage.findDocuments(txn,
+ _assertDocumentsEqual(storage.findDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kForward,
@@ -852,7 +853,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
// startKey provided; include start key
ASSERT_BSONOBJ_EQ(
BSON("_id" << 0),
- _assetGetFront(storage.findDocuments(txn,
+ _assetGetFront(storage.findDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kForward,
@@ -861,7 +862,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
1U)));
ASSERT_BSONOBJ_EQ(
BSON("_id" << 1),
- _assetGetFront(storage.findDocuments(txn,
+ _assetGetFront(storage.findDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kForward,
@@ -871,7 +872,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
ASSERT_BSONOBJ_EQ(
BSON("_id" << 1),
- _assetGetFront(storage.findDocuments(txn,
+ _assetGetFront(storage.findDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kForward,
@@ -882,7 +883,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
// startKey provided; include both start and end keys
ASSERT_BSONOBJ_EQ(
BSON("_id" << 1),
- _assetGetFront(storage.findDocuments(txn,
+ _assetGetFront(storage.findDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kForward,
@@ -893,7 +894,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
// startKey provided; exclude start key
ASSERT_BSONOBJ_EQ(
BSON("_id" << 2),
- _assetGetFront(storage.findDocuments(txn,
+ _assetGetFront(storage.findDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kForward,
@@ -903,7 +904,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
ASSERT_BSONOBJ_EQ(
BSON("_id" << 2),
- _assetGetFront(storage.findDocuments(txn,
+ _assetGetFront(storage.findDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kForward,
@@ -914,7 +915,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
// startKey provided; exclude both start and end keys
ASSERT_BSONOBJ_EQ(
BSON("_id" << 2),
- _assetGetFront(storage.findDocuments(txn,
+ _assetGetFront(storage.findDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kForward,
@@ -924,7 +925,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
// startKey provided; exclude both start and end keys.
// A limit of 3 should return 2 documents because we reached the end of the collection.
- _assertDocumentsEqual(storage.findDocuments(txn,
+ _assertDocumentsEqual(storage.findDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kForward,
@@ -934,19 +935,19 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
{BSON("_id" << 3), BSON("_id" << 4)});
_assertDocumentsInCollectionEquals(
- txn,
+ opCtx,
nss,
{BSON("_id" << 0), BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 3), BSON("_id" << 4)});
}
TEST_F(StorageInterfaceImplWithReplCoordTest,
FindDocumentsReturnsDocumentWithHighestKeyValueIfScanDirectionIsBackward) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
auto nss = makeNamespace(_agent);
auto indexName = "_id_"_sd;
- ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions()));
- ASSERT_OK(storage.insertDocuments(txn,
+ ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions()));
+ ASSERT_OK(storage.insertDocuments(opCtx,
nss,
{BSON("_id" << 0),
BSON("_id" << 1),
@@ -957,7 +958,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
// startKey not provided
ASSERT_BSONOBJ_EQ(
BSON("_id" << 4),
- _assetGetFront(storage.findDocuments(txn,
+ _assetGetFront(storage.findDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kBackward,
@@ -966,7 +967,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
1U)));
// startKey not provided. limit is 0.
- _assertDocumentsEqual(storage.findDocuments(txn,
+ _assertDocumentsEqual(storage.findDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kBackward,
@@ -976,7 +977,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
{});
// startKey not provided. limit of 2.
- _assertDocumentsEqual(storage.findDocuments(txn,
+ _assertDocumentsEqual(storage.findDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kBackward,
@@ -988,7 +989,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
// startKey provided; include start key
ASSERT_BSONOBJ_EQ(
BSON("_id" << 4),
- _assetGetFront(storage.findDocuments(txn,
+ _assetGetFront(storage.findDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kBackward,
@@ -997,7 +998,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
1U)));
ASSERT_BSONOBJ_EQ(
BSON("_id" << 3),
- _assetGetFront(storage.findDocuments(txn,
+ _assetGetFront(storage.findDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kBackward,
@@ -1008,7 +1009,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
// startKey provided; include both start and end keys
ASSERT_BSONOBJ_EQ(
BSON("_id" << 4),
- _assetGetFront(storage.findDocuments(txn,
+ _assetGetFront(storage.findDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kBackward,
@@ -1019,7 +1020,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
// startKey provided; exclude start key
ASSERT_BSONOBJ_EQ(
BSON("_id" << 2),
- _assetGetFront(storage.findDocuments(txn,
+ _assetGetFront(storage.findDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kBackward,
@@ -1030,7 +1031,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
// startKey provided; exclude both start and end keys
ASSERT_BSONOBJ_EQ(
BSON("_id" << 2),
- _assetGetFront(storage.findDocuments(txn,
+ _assetGetFront(storage.findDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kBackward,
@@ -1040,7 +1041,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
// startKey provided; exclude both start and end keys.
// A limit of 3 should return 2 documents because we reached the beginning of the collection.
- _assertDocumentsEqual(storage.findDocuments(txn,
+ _assertDocumentsEqual(storage.findDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kBackward,
@@ -1050,22 +1051,22 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
{BSON("_id" << 1), BSON("_id" << 0)});
_assertDocumentsInCollectionEquals(
- txn,
+ opCtx,
nss,
{BSON("_id" << 0), BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 3), BSON("_id" << 4)});
}
TEST_F(StorageInterfaceImplWithReplCoordTest,
FindDocumentsCollScanReturnsFirstDocumentInsertedIfScanDirectionIsForward) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
auto nss = makeNamespace(_agent);
- ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions()));
- ASSERT_OK(
- storage.insertDocuments(txn, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)}));
+ ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions()));
+ ASSERT_OK(storage.insertDocuments(
+ opCtx, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)}));
ASSERT_BSONOBJ_EQ(
BSON("_id" << 1),
- _assetGetFront(storage.findDocuments(txn,
+ _assetGetFront(storage.findDocuments(opCtx,
nss,
boost::none,
StorageInterface::ScanDirection::kForward,
@@ -1074,7 +1075,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
1U)));
// Check collection contents. OplogInterface returns documents in reverse natural order.
- OplogInterfaceLocal oplog(txn, nss.ns());
+ OplogInterfaceLocal oplog(opCtx, nss.ns());
auto iter = oplog.makeIterator();
ASSERT_BSONOBJ_EQ(BSON("_id" << 0), unittest::assertGet(iter->next()).first);
ASSERT_BSONOBJ_EQ(BSON("_id" << 2), unittest::assertGet(iter->next()).first);
@@ -1084,15 +1085,15 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
TEST_F(StorageInterfaceImplWithReplCoordTest,
FindDocumentsCollScanReturnsLastDocumentInsertedIfScanDirectionIsBackward) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
auto nss = makeNamespace(_agent);
- ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions()));
- ASSERT_OK(
- storage.insertDocuments(txn, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)}));
+ ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions()));
+ ASSERT_OK(storage.insertDocuments(
+ opCtx, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)}));
ASSERT_BSONOBJ_EQ(
BSON("_id" << 0),
- _assetGetFront(storage.findDocuments(txn,
+ _assetGetFront(storage.findDocuments(opCtx,
nss,
boost::none,
StorageInterface::ScanDirection::kBackward,
@@ -1101,20 +1102,20 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
1U)));
_assertDocumentsInCollectionEquals(
- txn, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)});
+ opCtx, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)});
}
TEST_F(StorageInterfaceImplWithReplCoordTest,
FindDocumentsCollScanReturnsNoSuchKeyIfStartKeyIsNotEmpty) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
auto nss = makeNamespace(_agent);
- ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions()));
- ASSERT_OK(
- storage.insertDocuments(txn, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)}));
+ ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions()));
+ ASSERT_OK(storage.insertDocuments(
+ opCtx, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)}));
ASSERT_EQUALS(ErrorCodes::NoSuchKey,
storage
- .findDocuments(txn,
+ .findDocuments(opCtx,
nss,
boost::none,
StorageInterface::ScanDirection::kForward,
@@ -1126,15 +1127,15 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
TEST_F(StorageInterfaceImplWithReplCoordTest,
FindDocumentsCollScanReturnsInvalidOptionsIfBoundIsNotStartKeyOnly) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
auto nss = makeNamespace(_agent);
- ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions()));
- ASSERT_OK(
- storage.insertDocuments(txn, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)}));
+ ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions()));
+ ASSERT_OK(storage.insertDocuments(
+ opCtx, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)}));
ASSERT_EQUALS(ErrorCodes::InvalidOptions,
storage
- .findDocuments(txn,
+ .findDocuments(opCtx,
nss,
boost::none,
StorageInterface::ScanDirection::kForward,
@@ -1146,13 +1147,13 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
TEST_F(StorageInterfaceImplWithReplCoordTest,
DeleteDocumentsReturnsInvalidNamespaceIfCollectionIsMissing) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
auto nss = makeNamespace(_agent);
auto indexName = "_id_"_sd;
ASSERT_EQUALS(ErrorCodes::NamespaceNotFound,
storage
- .deleteDocuments(txn,
+ .deleteDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kForward,
@@ -1163,14 +1164,14 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
}
TEST_F(StorageInterfaceImplWithReplCoordTest, DeleteDocumentsReturnsIndexNotFoundIfIndexIsMissing) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
auto nss = makeNamespace(_agent);
auto indexName = "nonexistent"_sd;
- ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions()));
+ ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions()));
ASSERT_EQUALS(ErrorCodes::IndexNotFound,
storage
- .deleteDocuments(txn,
+ .deleteDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kForward,
@@ -1182,13 +1183,13 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, DeleteDocumentsReturnsIndexNotFoun
TEST_F(StorageInterfaceImplWithReplCoordTest,
DeleteDocumentsReturnsEmptyVectorIfCollectionIsEmpty) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
auto nss = makeNamespace(_agent);
auto indexName = "_id_"_sd;
- ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions()));
+ ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions()));
ASSERT_TRUE(
- unittest::assertGet(storage.deleteDocuments(txn,
+ unittest::assertGet(storage.deleteDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kForward,
@@ -1200,12 +1201,12 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
TEST_F(StorageInterfaceImplWithReplCoordTest,
DeleteDocumentsReturnsDocumentWithLowestKeyValueIfScanDirectionIsForward) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
auto nss = makeNamespace(_agent);
auto indexName = "_id_"_sd;
- ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions()));
- ASSERT_OK(storage.insertDocuments(txn,
+ ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions()));
+ ASSERT_OK(storage.insertDocuments(opCtx,
nss,
{BSON("_id" << 0),
BSON("_id" << 1),
@@ -1219,7 +1220,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
// startKey not provided
ASSERT_BSONOBJ_EQ(
BSON("_id" << 0),
- _assetGetFront(storage.deleteDocuments(txn,
+ _assetGetFront(storage.deleteDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kForward,
@@ -1227,7 +1228,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
BoundInclusion::kIncludeStartKeyOnly,
1U)));
- _assertDocumentsInCollectionEquals(txn,
+ _assertDocumentsInCollectionEquals(opCtx,
nss,
{BSON("_id" << 1),
BSON("_id" << 2),
@@ -1238,7 +1239,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
BSON("_id" << 7)});
// startKey not provided. limit is 0.
- _assertDocumentsEqual(storage.deleteDocuments(txn,
+ _assertDocumentsEqual(storage.deleteDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kForward,
@@ -1247,7 +1248,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
0U),
{});
- _assertDocumentsInCollectionEquals(txn,
+ _assertDocumentsInCollectionEquals(opCtx,
nss,
{BSON("_id" << 1),
BSON("_id" << 2),
@@ -1260,7 +1261,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
// startKey provided; include start key
ASSERT_BSONOBJ_EQ(
BSON("_id" << 2),
- _assetGetFront(storage.deleteDocuments(txn,
+ _assetGetFront(storage.deleteDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kForward,
@@ -1268,7 +1269,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
BoundInclusion::kIncludeStartKeyOnly,
1U)));
- _assertDocumentsInCollectionEquals(txn,
+ _assertDocumentsInCollectionEquals(opCtx,
nss,
{BSON("_id" << 1),
BSON("_id" << 3),
@@ -1280,7 +1281,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
// startKey provided; exclude start key
ASSERT_BSONOBJ_EQ(
BSON("_id" << 5),
- _assetGetFront(storage.deleteDocuments(txn,
+ _assetGetFront(storage.deleteDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kForward,
@@ -1289,13 +1290,13 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
1U)));
_assertDocumentsInCollectionEquals(
- txn,
+ opCtx,
nss,
{BSON("_id" << 1), BSON("_id" << 3), BSON("_id" << 4), BSON("_id" << 6), BSON("_id" << 7)});
// startKey provided; exclude start key.
// A limit of 3 should return 2 documents because we reached the end of the collection.
- _assertDocumentsEqual(storage.deleteDocuments(txn,
+ _assertDocumentsEqual(storage.deleteDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kForward,
@@ -1305,17 +1306,17 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
{BSON("_id" << 6), BSON("_id" << 7)});
_assertDocumentsInCollectionEquals(
- txn, nss, {BSON("_id" << 1), BSON("_id" << 3), BSON("_id" << 4)});
+ opCtx, nss, {BSON("_id" << 1), BSON("_id" << 3), BSON("_id" << 4)});
}
TEST_F(StorageInterfaceImplWithReplCoordTest,
DeleteDocumentsReturnsDocumentWithHighestKeyValueIfScanDirectionIsBackward) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
auto nss = makeNamespace(_agent);
auto indexName = "_id_"_sd;
- ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions()));
- ASSERT_OK(storage.insertDocuments(txn,
+ ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions()));
+ ASSERT_OK(storage.insertDocuments(opCtx,
nss,
{BSON("_id" << 0),
BSON("_id" << 1),
@@ -1329,7 +1330,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
// startKey not provided
ASSERT_BSONOBJ_EQ(
BSON("_id" << 7),
- _assetGetFront(storage.deleteDocuments(txn,
+ _assetGetFront(storage.deleteDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kBackward,
@@ -1337,7 +1338,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
BoundInclusion::kIncludeStartKeyOnly,
1U)));
- _assertDocumentsInCollectionEquals(txn,
+ _assertDocumentsInCollectionEquals(opCtx,
nss,
{BSON("_id" << 0),
BSON("_id" << 1),
@@ -1348,7 +1349,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
BSON("_id" << 6)});
// startKey not provided. limit is 0.
- _assertDocumentsEqual(storage.deleteDocuments(txn,
+ _assertDocumentsEqual(storage.deleteDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kBackward,
@@ -1357,7 +1358,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
0U),
{});
- _assertDocumentsInCollectionEquals(txn,
+ _assertDocumentsInCollectionEquals(opCtx,
nss,
{BSON("_id" << 0),
BSON("_id" << 1),
@@ -1370,7 +1371,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
// startKey provided; include start key
ASSERT_BSONOBJ_EQ(
BSON("_id" << 5),
- _assetGetFront(storage.deleteDocuments(txn,
+ _assetGetFront(storage.deleteDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kBackward,
@@ -1378,7 +1379,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
BoundInclusion::kIncludeStartKeyOnly,
1U)));
- _assertDocumentsInCollectionEquals(txn,
+ _assertDocumentsInCollectionEquals(opCtx,
nss,
{BSON("_id" << 0),
BSON("_id" << 1),
@@ -1390,7 +1391,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
// startKey provided; exclude start key
ASSERT_BSONOBJ_EQ(
BSON("_id" << 2),
- _assetGetFront(storage.deleteDocuments(txn,
+ _assetGetFront(storage.deleteDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kBackward,
@@ -1399,13 +1400,13 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
1U)));
_assertDocumentsInCollectionEquals(
- txn,
+ opCtx,
nss,
{BSON("_id" << 0), BSON("_id" << 1), BSON("_id" << 3), BSON("_id" << 4), BSON("_id" << 6)});
// startKey provided; exclude start key.
// A limit of 3 should return 2 documents because we reached the beginning of the collection.
- _assertDocumentsEqual(storage.deleteDocuments(txn,
+ _assertDocumentsEqual(storage.deleteDocuments(opCtx,
nss,
indexName,
StorageInterface::ScanDirection::kBackward,
@@ -1415,20 +1416,20 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
{BSON("_id" << 1), BSON("_id" << 0)});
_assertDocumentsInCollectionEquals(
- txn, nss, {BSON("_id" << 3), BSON("_id" << 4), BSON("_id" << 6)});
+ opCtx, nss, {BSON("_id" << 3), BSON("_id" << 4), BSON("_id" << 6)});
}
TEST_F(StorageInterfaceImplWithReplCoordTest,
DeleteDocumentsCollScanReturnsFirstDocumentInsertedIfScanDirectionIsForward) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
auto nss = makeNamespace(_agent);
- ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions()));
- ASSERT_OK(
- storage.insertDocuments(txn, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)}));
+ ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions()));
+ ASSERT_OK(storage.insertDocuments(
+ opCtx, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)}));
ASSERT_BSONOBJ_EQ(
BSON("_id" << 1),
- _assetGetFront(storage.deleteDocuments(txn,
+ _assetGetFront(storage.deleteDocuments(opCtx,
nss,
boost::none,
StorageInterface::ScanDirection::kForward,
@@ -1436,20 +1437,20 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
BoundInclusion::kIncludeStartKeyOnly,
1U)));
- _assertDocumentsInCollectionEquals(txn, nss, {BSON("_id" << 2), BSON("_id" << 0)});
+ _assertDocumentsInCollectionEquals(opCtx, nss, {BSON("_id" << 2), BSON("_id" << 0)});
}
TEST_F(StorageInterfaceImplWithReplCoordTest,
DeleteDocumentsCollScanReturnsLastDocumentInsertedIfScanDirectionIsBackward) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
auto nss = makeNamespace(_agent);
- ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions()));
- ASSERT_OK(
- storage.insertDocuments(txn, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)}));
+ ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions()));
+ ASSERT_OK(storage.insertDocuments(
+ opCtx, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)}));
ASSERT_BSONOBJ_EQ(
BSON("_id" << 0),
- _assetGetFront(storage.deleteDocuments(txn,
+ _assetGetFront(storage.deleteDocuments(opCtx,
nss,
boost::none,
StorageInterface::ScanDirection::kBackward,
@@ -1457,20 +1458,20 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
BoundInclusion::kIncludeStartKeyOnly,
1U)));
- _assertDocumentsInCollectionEquals(txn, nss, {BSON("_id" << 1), BSON("_id" << 2)});
+ _assertDocumentsInCollectionEquals(opCtx, nss, {BSON("_id" << 1), BSON("_id" << 2)});
}
TEST_F(StorageInterfaceImplWithReplCoordTest,
DeleteDocumentsCollScanReturnsNoSuchKeyIfStartKeyIsNotEmpty) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
auto nss = makeNamespace(_agent);
- ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions()));
- ASSERT_OK(
- storage.insertDocuments(txn, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)}));
+ ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions()));
+ ASSERT_OK(storage.insertDocuments(
+ opCtx, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)}));
ASSERT_EQUALS(ErrorCodes::NoSuchKey,
storage
- .deleteDocuments(txn,
+ .deleteDocuments(opCtx,
nss,
boost::none,
StorageInterface::ScanDirection::kForward,
@@ -1482,15 +1483,15 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
TEST_F(StorageInterfaceImplWithReplCoordTest,
DeleteDocumentsCollScanReturnsInvalidOptionsIfBoundIsNotStartKeyOnly) {
- auto txn = getOperationContext();
+ auto opCtx = getOperationContext();
StorageInterfaceImpl storage;
auto nss = makeNamespace(_agent);
- ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions()));
- ASSERT_OK(
- storage.insertDocuments(txn, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)}));
+ ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions()));
+ ASSERT_OK(storage.insertDocuments(
+ opCtx, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)}));
ASSERT_EQUALS(ErrorCodes::InvalidOptions,
storage
- .deleteDocuments(txn,
+ .deleteDocuments(opCtx,
nss,
boost::none,
StorageInterface::ScanDirection::kForward,
diff --git a/src/mongo/db/repl/storage_interface_mock.cpp b/src/mongo/db/repl/storage_interface_mock.cpp
index ec870c1a0cc..4b534c05d6a 100644
--- a/src/mongo/db/repl/storage_interface_mock.cpp
+++ b/src/mongo/db/repl/storage_interface_mock.cpp
@@ -40,53 +40,53 @@ namespace mongo {
namespace repl {
void StorageInterfaceMock::startup() {}
void StorageInterfaceMock::shutdown() {}
-bool StorageInterfaceMock::getInitialSyncFlag(OperationContext* txn) const {
+bool StorageInterfaceMock::getInitialSyncFlag(OperationContext* opCtx) const {
stdx::lock_guard<stdx::mutex> lock(_initialSyncFlagMutex);
return _initialSyncFlag;
}
-void StorageInterfaceMock::setInitialSyncFlag(OperationContext* txn) {
+void StorageInterfaceMock::setInitialSyncFlag(OperationContext* opCtx) {
stdx::lock_guard<stdx::mutex> lock(_initialSyncFlagMutex);
_initialSyncFlag = true;
}
-void StorageInterfaceMock::clearInitialSyncFlag(OperationContext* txn) {
+void StorageInterfaceMock::clearInitialSyncFlag(OperationContext* opCtx) {
stdx::lock_guard<stdx::mutex> lock(_initialSyncFlagMutex);
_initialSyncFlag = false;
}
-OpTime StorageInterfaceMock::getMinValid(OperationContext* txn) const {
+OpTime StorageInterfaceMock::getMinValid(OperationContext* opCtx) const {
stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
return _minValid;
}
-void StorageInterfaceMock::setMinValid(OperationContext* txn, const OpTime& minValid) {
+void StorageInterfaceMock::setMinValid(OperationContext* opCtx, const OpTime& minValid) {
stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
_minValid = minValid;
}
-void StorageInterfaceMock::setMinValidToAtLeast(OperationContext* txn, const OpTime& minValid) {
+void StorageInterfaceMock::setMinValidToAtLeast(OperationContext* opCtx, const OpTime& minValid) {
stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
_minValid = std::max(_minValid, minValid);
}
-void StorageInterfaceMock::setOplogDeleteFromPoint(OperationContext* txn,
+void StorageInterfaceMock::setOplogDeleteFromPoint(OperationContext* opCtx,
const Timestamp& timestamp) {
stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
_oplogDeleteFromPoint = timestamp;
}
-Timestamp StorageInterfaceMock::getOplogDeleteFromPoint(OperationContext* txn) {
+Timestamp StorageInterfaceMock::getOplogDeleteFromPoint(OperationContext* opCtx) {
stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
return _oplogDeleteFromPoint;
}
-void StorageInterfaceMock::setAppliedThrough(OperationContext* txn, const OpTime& optime) {
+void StorageInterfaceMock::setAppliedThrough(OperationContext* opCtx, const OpTime& optime) {
stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
_appliedThrough = optime;
}
-OpTime StorageInterfaceMock::getAppliedThrough(OperationContext* txn) {
+OpTime StorageInterfaceMock::getAppliedThrough(OperationContext* opCtx) {
stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
return _appliedThrough;
}
diff --git a/src/mongo/db/repl/storage_interface_mock.h b/src/mongo/db/repl/storage_interface_mock.h
index 911244c2670..9f63764473b 100644
--- a/src/mongo/db/repl/storage_interface_mock.h
+++ b/src/mongo/db/repl/storage_interface_mock.h
@@ -92,17 +92,18 @@ public:
const BSONObj idIndexSpec,
const std::vector<BSONObj>& secondaryIndexSpecs)>;
using InsertDocumentFn = stdx::function<Status(
- OperationContext* txn, const NamespaceString& nss, const BSONObj& doc)>;
+ OperationContext* opCtx, const NamespaceString& nss, const BSONObj& doc)>;
using InsertDocumentsFn = stdx::function<Status(
- OperationContext* txn, const NamespaceString& nss, const std::vector<BSONObj>& docs)>;
- using DropUserDatabasesFn = stdx::function<Status(OperationContext* txn)>;
- using CreateOplogFn = stdx::function<Status(OperationContext* txn, const NamespaceString& nss)>;
+ OperationContext* opCtx, const NamespaceString& nss, const std::vector<BSONObj>& docs)>;
+ using DropUserDatabasesFn = stdx::function<Status(OperationContext* opCtx)>;
+ using CreateOplogFn =
+ stdx::function<Status(OperationContext* opCtx, const NamespaceString& nss)>;
using CreateCollectionFn = stdx::function<Status(
- OperationContext* txn, const NamespaceString& nss, const CollectionOptions& options)>;
+ OperationContext* opCtx, const NamespaceString& nss, const CollectionOptions& options)>;
using DropCollectionFn =
- stdx::function<Status(OperationContext* txn, const NamespaceString& nss)>;
+ stdx::function<Status(OperationContext* opCtx, const NamespaceString& nss)>;
using FindDocumentsFn =
- stdx::function<StatusWith<std::vector<BSONObj>>(OperationContext* txn,
+ stdx::function<StatusWith<std::vector<BSONObj>>(OperationContext* opCtx,
const NamespaceString& nss,
boost::optional<StringData> indexName,
ScanDirection scanDirection,
@@ -110,31 +111,31 @@ public:
BoundInclusion boundInclusion,
std::size_t limit)>;
using DeleteDocumentsFn =
- stdx::function<StatusWith<std::vector<BSONObj>>(OperationContext* txn,
+ stdx::function<StatusWith<std::vector<BSONObj>>(OperationContext* opCtx,
const NamespaceString& nss,
boost::optional<StringData> indexName,
ScanDirection scanDirection,
const BSONObj& startKey,
BoundInclusion boundInclusion,
std::size_t limit)>;
- using IsAdminDbValidFn = stdx::function<Status(OperationContext* txn)>;
+ using IsAdminDbValidFn = stdx::function<Status(OperationContext* opCtx)>;
StorageInterfaceMock() = default;
void startup() override;
void shutdown() override;
- bool getInitialSyncFlag(OperationContext* txn) const override;
- void setInitialSyncFlag(OperationContext* txn) override;
- void clearInitialSyncFlag(OperationContext* txn) override;
+ bool getInitialSyncFlag(OperationContext* opCtx) const override;
+ void setInitialSyncFlag(OperationContext* opCtx) override;
+ void clearInitialSyncFlag(OperationContext* opCtx) override;
- OpTime getMinValid(OperationContext* txn) const override;
- void setMinValid(OperationContext* txn, const OpTime& minValid) override;
- void setMinValidToAtLeast(OperationContext* txn, const OpTime& minValid) override;
- void setOplogDeleteFromPoint(OperationContext* txn, const Timestamp& timestamp) override;
- Timestamp getOplogDeleteFromPoint(OperationContext* txn) override;
- void setAppliedThrough(OperationContext* txn, const OpTime& optime) override;
- OpTime getAppliedThrough(OperationContext* txn) override;
+ OpTime getMinValid(OperationContext* opCtx) const override;
+ void setMinValid(OperationContext* opCtx, const OpTime& minValid) override;
+ void setMinValidToAtLeast(OperationContext* opCtx, const OpTime& minValid) override;
+ void setOplogDeleteFromPoint(OperationContext* opCtx, const Timestamp& timestamp) override;
+ Timestamp getOplogDeleteFromPoint(OperationContext* opCtx) override;
+ void setAppliedThrough(OperationContext* opCtx, const OpTime& optime) override;
+ OpTime getAppliedThrough(OperationContext* opCtx) override;
StatusWith<std::unique_ptr<CollectionBulkLoader>> createCollectionForBulkLoading(
const NamespaceString& nss,
@@ -144,51 +145,53 @@ public:
return createCollectionForBulkFn(nss, options, idIndexSpec, secondaryIndexSpecs);
};
- Status insertDocument(OperationContext* txn,
+ Status insertDocument(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& doc) override {
- return insertDocumentFn(txn, nss, doc);
+ return insertDocumentFn(opCtx, nss, doc);
};
- Status insertDocuments(OperationContext* txn,
+ Status insertDocuments(OperationContext* opCtx,
const NamespaceString& nss,
const std::vector<BSONObj>& docs) override {
- return insertDocumentsFn(txn, nss, docs);
+ return insertDocumentsFn(opCtx, nss, docs);
}
- Status dropReplicatedDatabases(OperationContext* txn) override {
- return dropUserDBsFn(txn);
+ Status dropReplicatedDatabases(OperationContext* opCtx) override {
+ return dropUserDBsFn(opCtx);
};
- Status createOplog(OperationContext* txn, const NamespaceString& nss) override {
- return createOplogFn(txn, nss);
+ Status createOplog(OperationContext* opCtx, const NamespaceString& nss) override {
+ return createOplogFn(opCtx, nss);
};
- StatusWith<size_t> getOplogMaxSize(OperationContext* txn, const NamespaceString& nss) override {
+ StatusWith<size_t> getOplogMaxSize(OperationContext* opCtx,
+ const NamespaceString& nss) override {
return 1024 * 1024 * 1024;
}
- Status createCollection(OperationContext* txn,
+ Status createCollection(OperationContext* opCtx,
const NamespaceString& nss,
const CollectionOptions& options) override {
- return createCollFn(txn, nss, options);
+ return createCollFn(opCtx, nss, options);
}
- Status dropCollection(OperationContext* txn, const NamespaceString& nss) override {
- return dropCollFn(txn, nss);
+ Status dropCollection(OperationContext* opCtx, const NamespaceString& nss) override {
+ return dropCollFn(opCtx, nss);
};
- StatusWith<std::vector<BSONObj>> findDocuments(OperationContext* txn,
+ StatusWith<std::vector<BSONObj>> findDocuments(OperationContext* opCtx,
const NamespaceString& nss,
boost::optional<StringData> indexName,
ScanDirection scanDirection,
const BSONObj& startKey,
BoundInclusion boundInclusion,
std::size_t limit) override {
- return findDocumentsFn(txn, nss, indexName, scanDirection, startKey, boundInclusion, limit);
+ return findDocumentsFn(
+ opCtx, nss, indexName, scanDirection, startKey, boundInclusion, limit);
}
- StatusWith<std::vector<BSONObj>> deleteDocuments(OperationContext* txn,
+ StatusWith<std::vector<BSONObj>> deleteDocuments(OperationContext* opCtx,
const NamespaceString& nss,
boost::optional<StringData> indexName,
ScanDirection scanDirection,
@@ -196,11 +199,11 @@ public:
BoundInclusion boundInclusion,
std::size_t limit) override {
return deleteDocumentsFn(
- txn, nss, indexName, scanDirection, startKey, boundInclusion, limit);
+ opCtx, nss, indexName, scanDirection, startKey, boundInclusion, limit);
}
- Status isAdminDbValid(OperationContext* txn) override {
- return isAdminDbValidFn(txn);
+ Status isAdminDbValid(OperationContext* opCtx) override {
+ return isAdminDbValidFn(opCtx);
};
@@ -214,27 +217,27 @@ public:
return Status{ErrorCodes::IllegalOperation, "CreateCollectionForBulkFn not implemented."};
};
InsertDocumentFn insertDocumentFn =
- [](OperationContext* txn, const NamespaceString& nss, const BSONObj& doc) {
+ [](OperationContext* opCtx, const NamespaceString& nss, const BSONObj& doc) {
return Status{ErrorCodes::IllegalOperation, "InsertDocumentFn not implemented."};
};
InsertDocumentsFn insertDocumentsFn =
- [](OperationContext* txn, const NamespaceString& nss, const std::vector<BSONObj>& docs) {
+ [](OperationContext* opCtx, const NamespaceString& nss, const std::vector<BSONObj>& docs) {
return Status{ErrorCodes::IllegalOperation, "InsertDocumentsFn not implemented."};
};
- DropUserDatabasesFn dropUserDBsFn = [](OperationContext* txn) {
+ DropUserDatabasesFn dropUserDBsFn = [](OperationContext* opCtx) {
return Status{ErrorCodes::IllegalOperation, "DropUserDatabasesFn not implemented."};
};
- CreateOplogFn createOplogFn = [](OperationContext* txn, const NamespaceString& nss) {
+ CreateOplogFn createOplogFn = [](OperationContext* opCtx, const NamespaceString& nss) {
return Status{ErrorCodes::IllegalOperation, "CreateOplogFn not implemented."};
};
CreateCollectionFn createCollFn =
- [](OperationContext* txn, const NamespaceString& nss, const CollectionOptions& options) {
+ [](OperationContext* opCtx, const NamespaceString& nss, const CollectionOptions& options) {
return Status{ErrorCodes::IllegalOperation, "CreateCollectionFn not implemented."};
};
- DropCollectionFn dropCollFn = [](OperationContext* txn, const NamespaceString& nss) {
+ DropCollectionFn dropCollFn = [](OperationContext* opCtx, const NamespaceString& nss) {
return Status{ErrorCodes::IllegalOperation, "DropCollectionFn not implemented."};
};
- FindDocumentsFn findDocumentsFn = [](OperationContext* txn,
+ FindDocumentsFn findDocumentsFn = [](OperationContext* opCtx,
const NamespaceString& nss,
boost::optional<StringData> indexName,
ScanDirection scanDirection,
@@ -243,7 +246,7 @@ public:
std::size_t limit) {
return Status{ErrorCodes::IllegalOperation, "FindOneFn not implemented."};
};
- DeleteDocumentsFn deleteDocumentsFn = [](OperationContext* txn,
+ DeleteDocumentsFn deleteDocumentsFn = [](OperationContext* opCtx,
const NamespaceString& nss,
boost::optional<StringData> indexName,
ScanDirection scanDirection,
diff --git a/src/mongo/db/repl/sync_source_feedback.cpp b/src/mongo/db/repl/sync_source_feedback.cpp
index 2633e058e44..b8c41b1e1fd 100644
--- a/src/mongo/db/repl/sync_source_feedback.cpp
+++ b/src/mongo/db/repl/sync_source_feedback.cpp
@@ -52,9 +52,9 @@ namespace {
* Calculates the keep alive interval based on the current configuration in the replication
* coordinator.
*/
-Milliseconds calculateKeepAliveInterval(OperationContext* txn, stdx::mutex& mtx) {
+Milliseconds calculateKeepAliveInterval(OperationContext* opCtx, stdx::mutex& mtx) {
stdx::lock_guard<stdx::mutex> lock(mtx);
- auto replCoord = repl::ReplicationCoordinator::get(txn);
+ auto replCoord = repl::ReplicationCoordinator::get(opCtx);
auto rsConfig = replCoord->getConfig();
auto keepAliveInterval = rsConfig.getElectionTimeoutPeriod() / 2;
return keepAliveInterval;
@@ -64,9 +64,9 @@ Milliseconds calculateKeepAliveInterval(OperationContext* txn, stdx::mutex& mtx)
* Returns function to prepare update command
*/
Reporter::PrepareReplSetUpdatePositionCommandFn makePrepareReplSetUpdatePositionCommandFn(
- OperationContext* txn, const HostAndPort& syncTarget, BackgroundSync* bgsync) {
- return [syncTarget, txn, bgsync](ReplicationCoordinator::ReplSetUpdatePositionCommandStyle
- commandStyle) -> StatusWith<BSONObj> {
+ OperationContext* opCtx, const HostAndPort& syncTarget, BackgroundSync* bgsync) {
+ return [syncTarget, opCtx, bgsync](ReplicationCoordinator::ReplSetUpdatePositionCommandStyle
+ commandStyle) -> StatusWith<BSONObj> {
auto currentSyncTarget = bgsync->getSyncTarget();
if (currentSyncTarget != syncTarget) {
if (currentSyncTarget.empty()) {
@@ -82,7 +82,7 @@ Reporter::PrepareReplSetUpdatePositionCommandFn makePrepareReplSetUpdatePosition
}
}
- auto replCoord = repl::ReplicationCoordinator::get(txn);
+ auto replCoord = repl::ReplicationCoordinator::get(opCtx);
if (replCoord->getMemberState().primary()) {
// Primary has no one to send updates to.
return Status(ErrorCodes::InvalidSyncSource,
@@ -149,10 +149,10 @@ void SyncSourceFeedback::run(executor::TaskExecutor* executor, BackgroundSync* b
Milliseconds keepAliveInterval(0);
while (true) { // breaks once _shutdownSignaled is true
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
if (keepAliveInterval == Milliseconds(0)) {
- keepAliveInterval = calculateKeepAliveInterval(txn.get(), _mtx);
+ keepAliveInterval = calculateKeepAliveInterval(opCtx.get(), _mtx);
}
{
@@ -163,7 +163,7 @@ void SyncSourceFeedback::run(executor::TaskExecutor* executor, BackgroundSync* b
while (!_positionChanged && !_shutdownSignaled) {
if (_cond.wait_for(lock, keepAliveInterval.toSystemDuration()) ==
stdx::cv_status::timeout) {
- MemberState state = ReplicationCoordinator::get(txn.get())->getMemberState();
+ MemberState state = ReplicationCoordinator::get(opCtx.get())->getMemberState();
if (!(state.primary() || state.startup())) {
break;
}
@@ -179,7 +179,7 @@ void SyncSourceFeedback::run(executor::TaskExecutor* executor, BackgroundSync* b
{
stdx::lock_guard<stdx::mutex> lock(_mtx);
- MemberState state = ReplicationCoordinator::get(txn.get())->getMemberState();
+ MemberState state = ReplicationCoordinator::get(opCtx.get())->getMemberState();
if (state.primary() || state.startup()) {
continue;
}
@@ -201,17 +201,18 @@ void SyncSourceFeedback::run(executor::TaskExecutor* executor, BackgroundSync* b
// Update keepalive value from config.
auto oldKeepAliveInterval = keepAliveInterval;
- keepAliveInterval = calculateKeepAliveInterval(txn.get(), _mtx);
+ keepAliveInterval = calculateKeepAliveInterval(opCtx.get(), _mtx);
if (oldKeepAliveInterval != keepAliveInterval) {
LOG(1) << "new syncSourceFeedback keep alive duration = " << keepAliveInterval
<< " (previously " << oldKeepAliveInterval << ")";
}
}
- Reporter reporter(executor,
- makePrepareReplSetUpdatePositionCommandFn(txn.get(), syncTarget, bgsync),
- syncTarget,
- keepAliveInterval);
+ Reporter reporter(
+ executor,
+ makePrepareReplSetUpdatePositionCommandFn(opCtx.get(), syncTarget, bgsync),
+ syncTarget,
+ keepAliveInterval);
{
stdx::lock_guard<stdx::mutex> lock(_mtx);
if (_shutdownSignaled) {
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index 277df2f9a9d..8738b47d027 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -255,8 +255,8 @@ void ApplyBatchFinalizerForJournal::_run() {
_latestOpTime = OpTime();
}
- auto txn = cc().makeOperationContext();
- txn->recoveryUnit()->waitUntilDurable();
+ auto opCtx = cc().makeOperationContext();
+ opCtx->recoveryUnit()->waitUntilDurable();
_recordDurable(latestOpTime);
}
}
@@ -276,19 +276,19 @@ std::unique_ptr<OldThreadPool> SyncTail::makeWriterPool() {
return stdx::make_unique<OldThreadPool>(replWriterThreadCount, "repl writer worker ");
}
-bool SyncTail::peek(OperationContext* txn, BSONObj* op) {
- return _networkQueue->peek(txn, op);
+bool SyncTail::peek(OperationContext* opCtx, BSONObj* op) {
+ return _networkQueue->peek(opCtx, op);
}
// static
-Status SyncTail::syncApply(OperationContext* txn,
+Status SyncTail::syncApply(OperationContext* opCtx,
const BSONObj& op,
bool inSteadyStateReplication,
ApplyOperationInLockFn applyOperationInLock,
ApplyCommandInLockFn applyCommandInLock,
IncrementOpsAppliedStatsFn incrementOpsAppliedStats) {
// Count each log op application as a separate operation, for reporting purposes
- CurOp individualOp(txn);
+ CurOp individualOp(opCtx);
const char* ns = op.getStringField("ns");
verify(ns);
@@ -312,24 +312,24 @@ Status SyncTail::syncApply(OperationContext* txn,
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
// a command may need a global write lock. so we will conservatively go
// ahead and grab one here. suboptimal. :-(
- Lock::GlobalWrite globalWriteLock(txn->lockState());
+ Lock::GlobalWrite globalWriteLock(opCtx->lockState());
// special case apply for commands to avoid implicit database creation
- Status status = applyCommandInLock(txn, op, inSteadyStateReplication);
+ Status status = applyCommandInLock(opCtx, op, inSteadyStateReplication);
incrementOpsAppliedStats();
return status;
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "syncApply_command", ns);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "syncApply_command", ns);
}
auto applyOp = [&](Database* db) {
// For non-initial-sync, we convert updates to upserts
// to suppress errors when replaying oplog entries.
- txn->setReplicatedWrites(false);
- DisableDocumentValidation validationDisabler(txn);
+ opCtx->setReplicatedWrites(false);
+ DisableDocumentValidation validationDisabler(opCtx);
Status status =
- applyOperationInLock(txn, db, op, inSteadyStateReplication, incrementOpsAppliedStats);
+ applyOperationInLock(opCtx, db, op, inSteadyStateReplication, incrementOpsAppliedStats);
if (!status.isOK() && status.code() == ErrorCodes::WriteConflict) {
throw WriteConflictException();
}
@@ -339,11 +339,11 @@ Status SyncTail::syncApply(OperationContext* txn,
if (isNoOp || (opType[0] == 'i' && nsToCollectionSubstring(ns) == "system.indexes")) {
auto opStr = isNoOp ? "syncApply_noop" : "syncApply_indexBuild";
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- Lock::DBLock dbLock(txn->lockState(), nsToDatabaseSubstring(ns), MODE_X);
- OldClientContext ctx(txn, ns);
+ Lock::DBLock dbLock(opCtx->lockState(), nsToDatabaseSubstring(ns), MODE_X);
+ OldClientContext ctx(opCtx, ns);
return applyOp(ctx.db());
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, opStr, ns);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, opStr, ns);
}
if (isCrudOpType(opType)) {
@@ -361,29 +361,29 @@ Status SyncTail::syncApply(OperationContext* txn,
// drop the DB lock before acquiring
// the upgraded one.
dbLock.reset();
- dbLock.reset(new Lock::DBLock(txn->lockState(), dbName, mode));
- collectionLock.reset(new Lock::CollectionLock(txn->lockState(), ns, mode));
+ dbLock.reset(new Lock::DBLock(opCtx->lockState(), dbName, mode));
+ collectionLock.reset(new Lock::CollectionLock(opCtx->lockState(), ns, mode));
};
resetLocks(MODE_IX);
- if (!dbHolder().get(txn, dbName)) {
+ if (!dbHolder().get(opCtx, dbName)) {
// Need to create database, so reset lock to stronger mode.
resetLocks(MODE_X);
- ctx.reset(new OldClientContext(txn, ns));
+ ctx.reset(new OldClientContext(opCtx, ns));
} else {
- ctx.reset(new OldClientContext(txn, ns));
+ ctx.reset(new OldClientContext(opCtx, ns));
if (!ctx->db()->getCollection(ns)) {
// Need to implicitly create collection. This occurs for 'u' opTypes,
// but not for 'i' nor 'd'.
ctx.reset();
resetLocks(MODE_X);
- ctx.reset(new OldClientContext(txn, ns));
+ ctx.reset(new OldClientContext(opCtx, ns));
}
}
return applyOp(ctx->db());
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "syncApply_CRUD", ns);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "syncApply_CRUD", ns);
}
// unknown opType
@@ -393,10 +393,10 @@ Status SyncTail::syncApply(OperationContext* txn,
return Status(ErrorCodes::BadValue, ss);
}
-Status SyncTail::syncApply(OperationContext* txn,
+Status SyncTail::syncApply(OperationContext* opCtx,
const BSONObj& op,
bool inSteadyStateReplication) {
- return SyncTail::syncApply(txn,
+ return SyncTail::syncApply(opCtx,
op,
inSteadyStateReplication,
applyOperation_inlock,
@@ -416,12 +416,12 @@ void prefetchOp(const BSONObj& op) {
try {
// one possible tweak here would be to stay in the read lock for this database
// for multiple prefetches if they are for the same database.
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- AutoGetCollectionForRead ctx(&txn, NamespaceString(ns));
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ AutoGetCollectionForRead ctx(&opCtx, NamespaceString(ns));
Database* db = ctx.getDb();
if (db) {
- prefetchPagesForReplicatedOp(&txn, db, op);
+ prefetchPagesForReplicatedOp(&opCtx, db, op);
}
} catch (const DBException& e) {
LOG(2) << "ignoring exception in prefetchOp(): " << redact(e) << endl;
@@ -468,7 +468,7 @@ void initializeWriterThread() {
// Schedules the writes to the oplog for 'ops' into threadPool. The caller must guarantee that 'ops'
// stays valid until all scheduled work in the thread pool completes.
-void scheduleWritesToOplog(OperationContext* txn,
+void scheduleWritesToOplog(OperationContext* opCtx,
OldThreadPool* threadPool,
const MultiApplier::Operations& ops) {
@@ -479,9 +479,9 @@ void scheduleWritesToOplog(OperationContext* txn,
return [&ops, begin, end] {
initializeWriterThread();
const auto txnHolder = cc().makeOperationContext();
- const auto txn = txnHolder.get();
- txn->lockState()->setShouldConflictWithSecondaryBatchApplication(false);
- txn->setReplicatedWrites(false);
+ const auto opCtx = txnHolder.get();
+ opCtx->lockState()->setShouldConflictWithSecondaryBatchApplication(false);
+ opCtx->setReplicatedWrites(false);
std::vector<BSONObj> docs;
docs.reserve(end - begin);
@@ -492,8 +492,8 @@ void scheduleWritesToOplog(OperationContext* txn,
}
fassertStatusOK(40141,
- StorageInterface::get(txn)->insertDocuments(
- txn, NamespaceString(rsOplogName), docs));
+ StorageInterface::get(opCtx)->insertDocuments(
+ opCtx, NamespaceString(rsOplogName), docs));
};
};
@@ -509,7 +509,7 @@ void scheduleWritesToOplog(OperationContext* txn,
// there would be no way to take advantage of multiple threads if a storage engine doesn't
// support document locking.
if (!enoughToMultiThread ||
- !txn->getServiceContext()->getGlobalStorageEngine()->supportsDocLocking()) {
+ !opCtx->getServiceContext()->getGlobalStorageEngine()->supportsDocLocking()) {
threadPool->schedule(makeOplogWriterForRange(0, ops.size()));
return;
@@ -536,24 +536,24 @@ public:
const CollatorInterface* collator = nullptr;
};
- CollectionProperties getCollectionProperties(OperationContext* txn,
+ CollectionProperties getCollectionProperties(OperationContext* opCtx,
const StringMapTraits::HashedKey& ns) {
auto it = _cache.find(ns);
if (it != _cache.end()) {
return it->second;
}
- auto collProperties = getCollectionPropertiesImpl(txn, ns.key());
+ auto collProperties = getCollectionPropertiesImpl(opCtx, ns.key());
_cache[ns] = collProperties;
return collProperties;
}
private:
- CollectionProperties getCollectionPropertiesImpl(OperationContext* txn, StringData ns) {
+ CollectionProperties getCollectionPropertiesImpl(OperationContext* opCtx, StringData ns) {
CollectionProperties collProperties;
- Lock::DBLock dbLock(txn->lockState(), nsToDatabaseSubstring(ns), MODE_IS);
- auto db = dbHolder().get(txn, ns);
+ Lock::DBLock dbLock(opCtx->lockState(), nsToDatabaseSubstring(ns), MODE_IS);
+ auto db = dbHolder().get(opCtx, ns);
if (!db) {
return collProperties;
}
@@ -573,7 +573,7 @@ private:
// This only modifies the isForCappedCollection field on each op. It does not alter the ops vector
// in any other way.
-void fillWriterVectors(OperationContext* txn,
+void fillWriterVectors(OperationContext* opCtx,
MultiApplier::Operations* ops,
std::vector<MultiApplier::OperationPtrs>* writerVectors) {
const bool supportsDocLocking =
@@ -587,7 +587,7 @@ void fillWriterVectors(OperationContext* txn,
uint32_t hash = hashedNs.hash();
if (op.isCrudOpType()) {
- auto collProperties = collPropertiesCache.getCollectionProperties(txn, hashedNs);
+ auto collProperties = collPropertiesCache.getCollectionProperties(opCtx, hashedNs);
// For doc locking engines, include the _id of the document in the hash so we get
// parallelism even if all writes are to a single collection.
@@ -620,7 +620,7 @@ void fillWriterVectors(OperationContext* txn,
// Applies a batch of oplog entries, by using a set of threads to apply the operations and then
// writes the oplog entries to the local oplog.
-OpTime SyncTail::multiApply(OperationContext* txn, MultiApplier::Operations ops) {
+OpTime SyncTail::multiApply(OperationContext* opCtx, MultiApplier::Operations ops) {
auto applyOperation = [this](MultiApplier::OperationPtrs* ops) -> Status {
_applyFunc(ops, this);
// This function is used by 3.2 initial sync and steady state data replication.
@@ -628,11 +628,11 @@ OpTime SyncTail::multiApply(OperationContext* txn, MultiApplier::Operations ops)
return Status::OK();
};
return fassertStatusOK(
- 34437, repl::multiApply(txn, _writerPool.get(), std::move(ops), applyOperation));
+ 34437, repl::multiApply(opCtx, _writerPool.get(), std::move(ops), applyOperation));
}
namespace {
-void tryToGoLiveAsASecondary(OperationContext* txn, ReplicationCoordinator* replCoord) {
+void tryToGoLiveAsASecondary(OperationContext* opCtx, ReplicationCoordinator* replCoord) {
if (replCoord->isInPrimaryOrSecondaryState()) {
return;
}
@@ -640,8 +640,8 @@ void tryToGoLiveAsASecondary(OperationContext* txn, ReplicationCoordinator* repl
// This needs to happen after the attempt so readers can be sure we've already tried.
ON_BLOCK_EXIT([] { attemptsToBecomeSecondary.increment(); });
- ScopedTransaction transaction(txn, MODE_S);
- Lock::GlobalRead readLock(txn->lockState());
+ ScopedTransaction transaction(opCtx, MODE_S);
+ Lock::GlobalRead readLock(opCtx->lockState());
if (replCoord->getMaintenanceMode()) {
LOG(1) << "Can't go live (tryToGoLiveAsASecondary) as maintenance mode is active.";
@@ -657,7 +657,7 @@ void tryToGoLiveAsASecondary(OperationContext* txn, ReplicationCoordinator* repl
}
// We can't go to SECONDARY until we reach minvalid.
- if (replCoord->getMyLastAppliedOpTime() < StorageInterface::get(txn)->getMinValid(txn)) {
+ if (replCoord->getMyLastAppliedOpTime() < StorageInterface::get(opCtx)->getMinValid(opCtx)) {
return;
}
@@ -697,13 +697,13 @@ public:
private:
void run() {
Client::initThread("ReplBatcher");
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- const auto replCoord = ReplicationCoordinator::get(&txn);
- const auto fastClockSource = txn.getServiceContext()->getFastClockSource();
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ const auto replCoord = ReplicationCoordinator::get(&opCtx);
+ const auto fastClockSource = opCtx.getServiceContext()->getFastClockSource();
const auto oplogMaxSize = fassertStatusOK(
40301,
- StorageInterface::get(&txn)->getOplogMaxSize(&txn, NamespaceString(rsOplogName)));
+ StorageInterface::get(&opCtx)->getOplogMaxSize(&opCtx, NamespaceString(rsOplogName)));
// Batches are limited to 10% of the oplog.
BatchLimits batchLimits;
@@ -720,7 +720,7 @@ private:
OpQueue ops;
// tryPopAndWaitForMore adds to ops and returns true when we need to end a batch early.
- while (!_syncTail->tryPopAndWaitForMore(&txn, &ops, batchLimits)) {
+ while (!_syncTail->tryPopAndWaitForMore(&opCtx, &ops, batchLimits)) {
}
if (ops.empty() && !ops.mustShutdown()) {
@@ -755,8 +755,8 @@ private:
void SyncTail::oplogApplication(ReplicationCoordinator* replCoord) {
OpQueueBatcher batcher(this);
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
std::unique_ptr<ApplyBatchFinalizer> finalizer{
getGlobalServiceContext()->getGlobalStorageEngine()->isDurable()
? new ApplyBatchFinalizerForJournal(replCoord)
@@ -774,7 +774,7 @@ void SyncTail::oplogApplication(ReplicationCoordinator* replCoord) {
sleepmillis(10);
}
- tryToGoLiveAsASecondary(&txn, replCoord);
+ tryToGoLiveAsASecondary(&opCtx, replCoord);
long long termWhenBufferIsEmpty = replCoord->getTerm();
// Blocks up to a second waiting for a batch to be ready to apply. If one doesn't become
@@ -788,7 +788,7 @@ void SyncTail::oplogApplication(ReplicationCoordinator* replCoord) {
continue;
}
// Signal drain complete if we're in Draining state and the buffer is empty.
- replCoord->signalDrainComplete(&txn, termWhenBufferIsEmpty);
+ replCoord->signalDrainComplete(&opCtx, termWhenBufferIsEmpty);
continue; // Try again.
}
@@ -813,13 +813,13 @@ void SyncTail::oplogApplication(ReplicationCoordinator* replCoord) {
stdx::lock_guard<SimpleMutex> fsynclk(filesLockedFsync);
// Do the work.
- multiApply(&txn, ops.releaseBatch());
+ multiApply(&opCtx, ops.releaseBatch());
// Update various things that care about our last applied optime. Tests rely on 2 happening
// before 3 even though it isn't strictly necessary. The order of 1 doesn't matter.
- setNewTimestamp(txn.getServiceContext(), lastOpTimeInBatch.getTimestamp()); // 1
- StorageInterface::get(&txn)->setAppliedThrough(&txn, lastOpTimeInBatch); // 2
- finalizer->record(lastOpTimeInBatch); // 3
+ setNewTimestamp(opCtx.getServiceContext(), lastOpTimeInBatch.getTimestamp()); // 1
+ StorageInterface::get(&opCtx)->setAppliedThrough(&opCtx, lastOpTimeInBatch); // 2
+ finalizer->record(lastOpTimeInBatch); // 3
}
}
@@ -830,13 +830,13 @@ void SyncTail::oplogApplication(ReplicationCoordinator* replCoord) {
// This function also blocks 1 second waiting for new ops to appear in the bgsync
// queue. We don't block forever so that we can periodically check for things like shutdown or
// reconfigs.
-bool SyncTail::tryPopAndWaitForMore(OperationContext* txn,
+bool SyncTail::tryPopAndWaitForMore(OperationContext* opCtx,
SyncTail::OpQueue* ops,
const BatchLimits& limits) {
{
BSONObj op;
// Check to see if there are ops waiting in the bgsync queue
- bool peek_success = peek(txn, &op);
+ bool peek_success = peek(opCtx, &op);
if (!peek_success) {
// If we don't have anything in the queue, wait a bit for something to appear.
if (ops->empty()) {
@@ -908,7 +908,7 @@ bool SyncTail::tryPopAndWaitForMore(OperationContext* txn,
(!entry.ns.empty() && nsToCollectionSubstring(entry.ns) == "system.indexes")) {
if (ops->getCount() == 1) {
// apply commands one-at-a-time
- _networkQueue->consume(txn);
+ _networkQueue->consume(opCtx);
} else {
// This op must be processed alone, but we already had ops in the queue so we can't
// include it in this batch. Since we didn't call consume(), we'll see this again next
@@ -921,7 +921,7 @@ bool SyncTail::tryPopAndWaitForMore(OperationContext* txn,
}
// We are going to apply this Op.
- _networkQueue->consume(txn);
+ _networkQueue->consume(opCtx);
// Go back for more ops, unless we've hit the limit.
return ops->getCount() >= limits.ops;
@@ -935,7 +935,7 @@ OldThreadPool* SyncTail::getWriterPool() {
return _writerPool.get();
}
-BSONObj SyncTail::getMissingDoc(OperationContext* txn, Database* db, const BSONObj& o) {
+BSONObj SyncTail::getMissingDoc(OperationContext* opCtx, Database* db, const BSONObj& o) {
OplogReader missingObjReader; // why are we using OplogReader to run a non-oplog query?
const char* ns = o.getStringField("ns");
@@ -1004,18 +1004,18 @@ BSONObj SyncTail::getMissingDoc(OperationContext* txn, Database* db, const BSONO
str::stream() << "Can no longer connect to initial sync source: " << _hostname);
}
-bool SyncTail::shouldRetry(OperationContext* txn, const BSONObj& o) {
+bool SyncTail::shouldRetry(OperationContext* opCtx, const BSONObj& o) {
const NamespaceString nss(o.getStringField("ns"));
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
// Take an X lock on the database in order to preclude other modifications.
// Also, the database might not exist yet, so create it.
- AutoGetOrCreateDb autoDb(txn, nss.db(), MODE_X);
+ AutoGetOrCreateDb autoDb(opCtx, nss.db(), MODE_X);
Database* const db = autoDb.getDb();
// we don't have the object yet, which is possible on initial sync. get it.
log() << "adding missing object" << endl; // rare enough we can log
- BSONObj missingObj = getMissingDoc(txn, db, o);
+ BSONObj missingObj = getMissingDoc(opCtx, db, o);
if (missingObj.isEmpty()) {
log() << "missing object not found on source."
@@ -1025,13 +1025,13 @@ bool SyncTail::shouldRetry(OperationContext* txn, const BSONObj& o) {
return false;
} else {
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
- Collection* const coll = db->getOrCreateCollection(txn, nss.toString());
+ Collection* const coll = db->getOrCreateCollection(opCtx, nss.toString());
invariant(coll);
OpDebug* const nullOpDebug = nullptr;
- Status status = coll->insertDocument(txn, missingObj, nullOpDebug, true);
+ Status status = coll->insertDocument(opCtx, missingObj, nullOpDebug, true);
uassert(15917,
str::stream() << "failed to insert missing doc: " << status.toString(),
status.isOK());
@@ -1042,7 +1042,7 @@ bool SyncTail::shouldRetry(OperationContext* txn, const BSONObj& o) {
return true;
}
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "InsertRetry", nss.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "InsertRetry", nss.ns());
// fixes compile errors on GCC - see SERVER-18219 for details
MONGO_UNREACHABLE;
@@ -1051,22 +1051,22 @@ bool SyncTail::shouldRetry(OperationContext* txn, const BSONObj& o) {
// This free function is used by the writer threads to apply each op
void multiSyncApply(MultiApplier::OperationPtrs* ops, SyncTail*) {
initializeWriterThread();
- auto txn = cc().makeOperationContext();
- auto syncApply = [](OperationContext* txn, const BSONObj& op, bool inSteadyStateReplication) {
- return SyncTail::syncApply(txn, op, inSteadyStateReplication);
+ auto opCtx = cc().makeOperationContext();
+ auto syncApply = [](OperationContext* opCtx, const BSONObj& op, bool inSteadyStateReplication) {
+ return SyncTail::syncApply(opCtx, op, inSteadyStateReplication);
};
- fassertNoTrace(16359, multiSyncApply_noAbort(txn.get(), ops, syncApply));
+ fassertNoTrace(16359, multiSyncApply_noAbort(opCtx.get(), ops, syncApply));
}
-Status multiSyncApply_noAbort(OperationContext* txn,
+Status multiSyncApply_noAbort(OperationContext* opCtx,
MultiApplier::OperationPtrs* oplogEntryPointers,
SyncApplyFn syncApply) {
- txn->setReplicatedWrites(false);
- DisableDocumentValidation validationDisabler(txn);
+ opCtx->setReplicatedWrites(false);
+ DisableDocumentValidation validationDisabler(opCtx);
// allow us to get through the magic barrier
- txn->lockState()->setShouldConflictWithSecondaryBatchApplication(false);
+ opCtx->lockState()->setShouldConflictWithSecondaryBatchApplication(false);
if (oplogEntryPointers->size() > 1) {
std::stable_sort(oplogEntryPointers->begin(),
@@ -1125,7 +1125,7 @@ Status multiSyncApply_noAbort(OperationContext* txn,
try {
// Apply the group of inserts.
uassertStatusOK(
- syncApply(txn, groupedInsertBuilder.done(), inSteadyStateReplication));
+ syncApply(opCtx, groupedInsertBuilder.done(), inSteadyStateReplication));
// It succeeded, advance the oplogEntriesIterator to the end of the
// group of inserts.
oplogEntriesIterator = endOfGroupableOpsIterator - 1;
@@ -1145,7 +1145,7 @@ Status multiSyncApply_noAbort(OperationContext* txn,
try {
// Apply an individual (non-grouped) op.
- const Status status = syncApply(txn, entry->raw, inSteadyStateReplication);
+ const Status status = syncApply(opCtx, entry->raw, inSteadyStateReplication);
if (!status.isOK()) {
severe() << "Error applying operation (" << redact(entry->raw)
@@ -1165,28 +1165,28 @@ Status multiSyncApply_noAbort(OperationContext* txn,
// This free function is used by the initial sync writer threads to apply each op
void multiInitialSyncApply_abortOnFailure(MultiApplier::OperationPtrs* ops, SyncTail* st) {
initializeWriterThread();
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
AtomicUInt32 fetchCount(0);
- fassertNoTrace(15915, multiInitialSyncApply_noAbort(txn.get(), ops, st, &fetchCount));
+ fassertNoTrace(15915, multiInitialSyncApply_noAbort(opCtx.get(), ops, st, &fetchCount));
}
Status multiInitialSyncApply(MultiApplier::OperationPtrs* ops,
SyncTail* st,
AtomicUInt32* fetchCount) {
initializeWriterThread();
- auto txn = cc().makeOperationContext();
- return multiInitialSyncApply_noAbort(txn.get(), ops, st, fetchCount);
+ auto opCtx = cc().makeOperationContext();
+ return multiInitialSyncApply_noAbort(opCtx.get(), ops, st, fetchCount);
}
-Status multiInitialSyncApply_noAbort(OperationContext* txn,
+Status multiInitialSyncApply_noAbort(OperationContext* opCtx,
MultiApplier::OperationPtrs* ops,
SyncTail* st,
AtomicUInt32* fetchCount) {
- txn->setReplicatedWrites(false);
- DisableDocumentValidation validationDisabler(txn);
+ opCtx->setReplicatedWrites(false);
+ DisableDocumentValidation validationDisabler(opCtx);
// allow us to get through the magic barrier
- txn->lockState()->setShouldConflictWithSecondaryBatchApplication(false);
+ opCtx->lockState()->setShouldConflictWithSecondaryBatchApplication(false);
// This function is only called in initial sync, as its name suggests.
const bool inSteadyStateReplication = false;
@@ -1194,7 +1194,7 @@ Status multiInitialSyncApply_noAbort(OperationContext* txn,
for (auto it = ops->begin(); it != ops->end(); ++it) {
auto& entry = **it;
try {
- const Status s = SyncTail::syncApply(txn, entry.raw, inSteadyStateReplication);
+ const Status s = SyncTail::syncApply(opCtx, entry.raw, inSteadyStateReplication);
if (!s.isOK()) {
// Don't retry on commands.
if (entry.isCommand()) {
@@ -1205,8 +1205,9 @@ Status multiInitialSyncApply_noAbort(OperationContext* txn,
// We might need to fetch the missing docs from the sync source.
fetchCount->fetchAndAdd(1);
- if (st->shouldRetry(txn, entry.raw)) {
- const Status s2 = SyncTail::syncApply(txn, entry.raw, inSteadyStateReplication);
+ if (st->shouldRetry(opCtx, entry.raw)) {
+ const Status s2 =
+ SyncTail::syncApply(opCtx, entry.raw, inSteadyStateReplication);
if (!s2.isOK()) {
severe() << "Error applying operation (" << redact(entry.raw)
<< "): " << redact(s2);
@@ -1234,11 +1235,11 @@ Status multiInitialSyncApply_noAbort(OperationContext* txn,
return Status::OK();
}
-StatusWith<OpTime> multiApply(OperationContext* txn,
+StatusWith<OpTime> multiApply(OperationContext* opCtx,
OldThreadPool* workerPool,
MultiApplier::Operations ops,
MultiApplier::ApplyOperationFn applyOperation) {
- if (!txn) {
+ if (!opCtx) {
return {ErrorCodes::BadValue, "invalid operation context"};
}
@@ -1259,14 +1260,14 @@ StatusWith<OpTime> multiApply(OperationContext* txn,
prefetchOps(ops, workerPool);
}
- auto storage = StorageInterface::get(txn);
+ auto storage = StorageInterface::get(opCtx);
LOG(2) << "replication batch size is " << ops.size();
// Stop all readers until we're done. This also prevents doc-locking engines from deleting old
// entries from the oplog until we finish writing.
- Lock::ParallelBatchWriterMode pbwm(txn->lockState());
+ Lock::ParallelBatchWriterMode pbwm(opCtx->lockState());
- auto replCoord = ReplicationCoordinator::get(txn);
+ auto replCoord = ReplicationCoordinator::get(opCtx);
if (replCoord->getApplierState() == ReplicationCoordinator::ApplierState::Stopped) {
severe() << "attempting to replicate ops while primary";
return {ErrorCodes::CannotApplyOplogWhilePrimary,
@@ -1280,14 +1281,14 @@ StatusWith<OpTime> multiApply(OperationContext* txn,
std::vector<MultiApplier::OperationPtrs> writerVectors(workerPool->getNumThreads());
ON_BLOCK_EXIT([&] { workerPool->join(); });
- storage->setOplogDeleteFromPoint(txn, ops.front().ts.timestamp());
- scheduleWritesToOplog(txn, workerPool, ops);
- fillWriterVectors(txn, &ops, &writerVectors);
+ storage->setOplogDeleteFromPoint(opCtx, ops.front().ts.timestamp());
+ scheduleWritesToOplog(opCtx, workerPool, ops);
+ fillWriterVectors(opCtx, &ops, &writerVectors);
workerPool->join();
- storage->setOplogDeleteFromPoint(txn, Timestamp());
- storage->setMinValidToAtLeast(txn, ops.back().getOpTime());
+ storage->setOplogDeleteFromPoint(opCtx, Timestamp());
+ storage->setMinValidToAtLeast(opCtx, ops.back().getOpTime());
applyOps(writerVectors, workerPool, applyOperation, &statusVector);
}
diff --git a/src/mongo/db/repl/sync_tail.h b/src/mongo/db/repl/sync_tail.h
index 98485782868..1f4aa0e12c4 100644
--- a/src/mongo/db/repl/sync_tail.h
+++ b/src/mongo/db/repl/sync_tail.h
@@ -70,7 +70,7 @@ public:
* 'opCounter' is used to update server status metrics.
* Returns failure status if the op was an update that could not be applied.
*/
- using ApplyOperationInLockFn = stdx::function<Status(OperationContext* txn,
+ using ApplyOperationInLockFn = stdx::function<Status(OperationContext* opCtx,
Database* db,
const BSONObj& opObj,
bool inSteadyStateReplication,
@@ -100,17 +100,19 @@ public:
* Functions for applying operations/commands and increment server status counters may
* be overridden for testing.
*/
- static Status syncApply(OperationContext* txn,
+ static Status syncApply(OperationContext* opCtx,
const BSONObj& o,
bool inSteadyStateReplication,
ApplyOperationInLockFn applyOperationInLock,
ApplyCommandInLockFn applyCommandInLock,
IncrementOpsAppliedStatsFn incrementOpsAppliedStats);
- static Status syncApply(OperationContext* txn, const BSONObj& o, bool inSteadyStateReplication);
+ static Status syncApply(OperationContext* opCtx,
+ const BSONObj& o,
+ bool inSteadyStateReplication);
void oplogApplication(ReplicationCoordinator* replCoord);
- bool peek(OperationContext* txn, BSONObj* obj);
+ bool peek(OperationContext* opCtx, BSONObj* obj);
class OpQueue {
public:
@@ -195,17 +197,17 @@ public:
* If ops is empty on entry and nothing can be added yet, will wait up to a second before
* returning true.
*/
- bool tryPopAndWaitForMore(OperationContext* txn, OpQueue* ops, const BatchLimits& limits);
+ bool tryPopAndWaitForMore(OperationContext* opCtx, OpQueue* ops, const BatchLimits& limits);
/**
* Fetch a single document referenced in the operation from the sync source.
*/
- virtual BSONObj getMissingDoc(OperationContext* txn, Database* db, const BSONObj& o);
+ virtual BSONObj getMissingDoc(OperationContext* opCtx, Database* db, const BSONObj& o);
/**
* If applyOperation_inlock should be called again after an update fails.
*/
- virtual bool shouldRetry(OperationContext* txn, const BSONObj& o);
+ virtual bool shouldRetry(OperationContext* opCtx, const BSONObj& o);
void setHostname(const std::string& hostname);
/**
@@ -222,7 +224,7 @@ protected:
// Apply a batch of operations, using multiple threads.
// Returns the last OpTime applied during the apply batch, ops.end["ts"] basically.
- OpTime multiApply(OperationContext* txn, MultiApplier::Operations ops);
+ OpTime multiApply(OperationContext* opCtx, MultiApplier::Operations ops);
private:
class OpQueueBatcher;
@@ -247,7 +249,7 @@ private:
*
* Shared between here and MultiApplier.
*/
-StatusWith<OpTime> multiApply(OperationContext* txn,
+StatusWith<OpTime> multiApply(OperationContext* opCtx,
OldThreadPool* workerPool,
MultiApplier::Operations ops,
MultiApplier::ApplyOperationFn applyOperation);
@@ -271,9 +273,9 @@ Status multiInitialSyncApply(MultiApplier::OperationPtrs* ops,
* Accepts an external operation context and a function with the same argument list as
* SyncTail::syncApply.
*/
-using SyncApplyFn =
- stdx::function<Status(OperationContext* txn, const BSONObj& o, bool inSteadyStateReplication)>;
-Status multiSyncApply_noAbort(OperationContext* txn,
+using SyncApplyFn = stdx::function<Status(
+ OperationContext* opCtx, const BSONObj& o, bool inSteadyStateReplication)>;
+Status multiSyncApply_noAbort(OperationContext* opCtx,
MultiApplier::OperationPtrs* ops,
SyncApplyFn syncApply);
@@ -281,7 +283,7 @@ Status multiSyncApply_noAbort(OperationContext* txn,
* Testing-only version of multiInitialSyncApply that accepts an external operation context and
* returns an error instead of aborting.
*/
-Status multiInitialSyncApply_noAbort(OperationContext* txn,
+Status multiInitialSyncApply_noAbort(OperationContext* opCtx,
MultiApplier::OperationPtrs* ops,
SyncTail* st,
AtomicUInt32* fetchCount);
diff --git a/src/mongo/db/repl/sync_tail_test.cpp b/src/mongo/db/repl/sync_tail_test.cpp
index b6946a4edc2..b1e62a2efa5 100644
--- a/src/mongo/db/repl/sync_tail_test.cpp
+++ b/src/mongo/db/repl/sync_tail_test.cpp
@@ -69,7 +69,7 @@ using namespace mongo::repl;
class SyncTailTest : public ServiceContextMongoDTest {
protected:
void _testSyncApplyInsertDocument(LockMode expectedMode);
- ServiceContext::UniqueOperationContext _txn;
+ ServiceContext::UniqueOperationContext _opCtx;
unsigned int _opsApplied;
SyncTail::ApplyOperationInLockFn _applyOp;
SyncTail::ApplyCommandInLockFn _applyCmd;
@@ -91,7 +91,7 @@ protected:
class SyncTailWithLocalDocumentFetcher : public SyncTail {
public:
SyncTailWithLocalDocumentFetcher(const BSONObj& document);
- BSONObj getMissingDoc(OperationContext* txn, Database* db, const BSONObj& o) override;
+ BSONObj getMissingDoc(OperationContext* opCtx, Database* db, const BSONObj& o) override;
private:
BSONObj _document;
@@ -103,7 +103,7 @@ private:
class SyncTailWithOperationContextChecker : public SyncTail {
public:
SyncTailWithOperationContextChecker();
- bool shouldRetry(OperationContext* txn, const BSONObj& o) override;
+ bool shouldRetry(OperationContext* opCtx, const BSONObj& o) override;
};
void SyncTailTest::setUp() {
@@ -122,19 +122,19 @@ void SyncTailTest::setUp() {
const std::vector<BSONObj>&) { return Status::OK(); };
StorageInterface::set(service, std::move(storageInterface));
- _txn = cc().makeOperationContext();
+ _opCtx = cc().makeOperationContext();
_opsApplied = 0;
- _applyOp = [](OperationContext* txn,
+ _applyOp = [](OperationContext* opCtx,
Database* db,
const BSONObj& op,
bool inSteadyStateReplication,
stdx::function<void()>) { return Status::OK(); };
- _applyCmd = [](OperationContext* txn, const BSONObj& op, bool) { return Status::OK(); };
+ _applyCmd = [](OperationContext* opCtx, const BSONObj& op, bool) { return Status::OK(); };
_incOps = [this]() { _opsApplied++; };
}
void SyncTailTest::tearDown() {
- _txn.reset();
+ _opCtx.reset();
ServiceContextMongoDTest::tearDown();
_storageInterface = nullptr;
}
@@ -151,10 +151,10 @@ BSONObj SyncTailWithLocalDocumentFetcher::getMissingDoc(OperationContext*,
SyncTailWithOperationContextChecker::SyncTailWithOperationContextChecker()
: SyncTail(nullptr, SyncTail::MultiSyncApplyFunc(), nullptr) {}
-bool SyncTailWithOperationContextChecker::shouldRetry(OperationContext* txn, const BSONObj&) {
- ASSERT_FALSE(txn->writesAreReplicated());
- ASSERT_FALSE(txn->lockState()->shouldConflictWithSecondaryBatchApplication());
- ASSERT_TRUE(documentValidationDisabled(txn));
+bool SyncTailWithOperationContextChecker::shouldRetry(OperationContext* opCtx, const BSONObj&) {
+ ASSERT_FALSE(opCtx->writesAreReplicated());
+ ASSERT_FALSE(opCtx->lockState()->shouldConflictWithSecondaryBatchApplication());
+ ASSERT_TRUE(documentValidationDisabled(opCtx));
return false;
}
@@ -173,21 +173,21 @@ CollectionOptions createOplogCollectionOptions() {
* Create test collection.
* Returns collection.
*/
-void createCollection(OperationContext* txn,
+void createCollection(OperationContext* opCtx,
const NamespaceString& nss,
const CollectionOptions& options) {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dblk(txn->lockState(), nss.db(), MODE_X);
- OldClientContext ctx(txn, nss.ns());
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock dblk(opCtx->lockState(), nss.db(), MODE_X);
+ OldClientContext ctx(opCtx, nss.ns());
auto db = ctx.db();
ASSERT_TRUE(db);
- mongo::WriteUnitOfWork wuow(txn);
- auto coll = db->createCollection(txn, nss.ns(), options);
+ mongo::WriteUnitOfWork wuow(opCtx);
+ auto coll = db->createCollection(opCtx, nss.ns(), options);
ASSERT_TRUE(coll);
wuow.commit();
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createCollection", nss.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "createCollection", nss.ns());
}
/**
@@ -250,7 +250,7 @@ OplogEntry makeUpdateDocumentOplogEntry(OpTime opTime,
return OplogEntry(bob.obj());
}
-Status failedApplyCommand(OperationContext* txn, const BSONObj& theOperation, bool) {
+Status failedApplyCommand(OperationContext* opCtx, const BSONObj& theOperation, bool) {
FAIL("applyCommand unexpectedly invoked.");
return Status::OK();
}
@@ -258,12 +258,12 @@ Status failedApplyCommand(OperationContext* txn, const BSONObj& theOperation, bo
TEST_F(SyncTailTest, SyncApplyNoNamespaceBadOp) {
const BSONObj op = BSON("op"
<< "x");
- ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, _applyOp, _applyCmd, _incOps));
+ ASSERT_OK(SyncTail::syncApply(_opCtx.get(), op, false, _applyOp, _applyCmd, _incOps));
ASSERT_EQUALS(0U, _opsApplied);
}
TEST_F(SyncTailTest, SyncApplyNoNamespaceNoOp) {
- ASSERT_OK(SyncTail::syncApply(_txn.get(),
+ ASSERT_OK(SyncTail::syncApply(_opCtx.get(),
BSON("op"
<< "n"),
false));
@@ -275,8 +275,9 @@ TEST_F(SyncTailTest, SyncApplyBadOp) {
<< "x"
<< "ns"
<< "test.t");
- ASSERT_EQUALS(ErrorCodes::BadValue,
- SyncTail::syncApply(_txn.get(), op, false, _applyOp, _applyCmd, _incOps).code());
+ ASSERT_EQUALS(
+ ErrorCodes::BadValue,
+ SyncTail::syncApply(_opCtx.get(), op, false, _applyOp, _applyCmd, _incOps).code());
ASSERT_EQUALS(0U, _opsApplied);
}
@@ -286,24 +287,24 @@ TEST_F(SyncTailTest, SyncApplyNoOp) {
<< "ns"
<< "test.t");
bool applyOpCalled = false;
- SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* txn,
+ SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* opCtx,
Database* db,
const BSONObj& theOperation,
bool inSteadyStateReplication,
stdx::function<void()>) {
applyOpCalled = true;
- ASSERT_TRUE(txn);
- ASSERT_TRUE(txn->lockState()->isDbLockedForMode("test", MODE_X));
- ASSERT_FALSE(txn->writesAreReplicated());
- ASSERT_TRUE(documentValidationDisabled(txn));
+ ASSERT_TRUE(opCtx);
+ ASSERT_TRUE(opCtx->lockState()->isDbLockedForMode("test", MODE_X));
+ ASSERT_FALSE(opCtx->writesAreReplicated());
+ ASSERT_TRUE(documentValidationDisabled(opCtx));
ASSERT_TRUE(db);
ASSERT_BSONOBJ_EQ(op, theOperation);
ASSERT_FALSE(inSteadyStateReplication);
return Status::OK();
};
- ASSERT_TRUE(_txn->writesAreReplicated());
- ASSERT_FALSE(documentValidationDisabled(_txn.get()));
- ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, failedApplyCommand, _incOps));
+ ASSERT_TRUE(_opCtx->writesAreReplicated());
+ ASSERT_FALSE(documentValidationDisabled(_opCtx.get()));
+ ASSERT_OK(SyncTail::syncApply(_opCtx.get(), op, false, applyOp, failedApplyCommand, _incOps));
ASSERT_TRUE(applyOpCalled);
}
@@ -313,7 +314,7 @@ TEST_F(SyncTailTest, SyncApplyNoOpApplyOpThrowsException) {
<< "ns"
<< "test.t");
int applyOpCalled = 0;
- SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* txn,
+ SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* opCtx,
Database* db,
const BSONObj& theOperation,
bool inSteadyStateReplication,
@@ -324,7 +325,7 @@ TEST_F(SyncTailTest, SyncApplyNoOpApplyOpThrowsException) {
}
return Status::OK();
};
- ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, failedApplyCommand, _incOps));
+ ASSERT_OK(SyncTail::syncApply(_opCtx.get(), op, false, applyOp, failedApplyCommand, _incOps));
ASSERT_EQUALS(5, applyOpCalled);
}
@@ -334,25 +335,25 @@ void SyncTailTest::_testSyncApplyInsertDocument(LockMode expectedMode) {
<< "ns"
<< "test.t");
bool applyOpCalled = false;
- SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* txn,
+ SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* opCtx,
Database* db,
const BSONObj& theOperation,
bool inSteadyStateReplication,
stdx::function<void()>) {
applyOpCalled = true;
- ASSERT_TRUE(txn);
- ASSERT_TRUE(txn->lockState()->isDbLockedForMode("test", expectedMode));
- ASSERT_TRUE(txn->lockState()->isCollectionLockedForMode("test.t", expectedMode));
- ASSERT_FALSE(txn->writesAreReplicated());
- ASSERT_TRUE(documentValidationDisabled(txn));
+ ASSERT_TRUE(opCtx);
+ ASSERT_TRUE(opCtx->lockState()->isDbLockedForMode("test", expectedMode));
+ ASSERT_TRUE(opCtx->lockState()->isCollectionLockedForMode("test.t", expectedMode));
+ ASSERT_FALSE(opCtx->writesAreReplicated());
+ ASSERT_TRUE(documentValidationDisabled(opCtx));
ASSERT_TRUE(db);
ASSERT_BSONOBJ_EQ(op, theOperation);
ASSERT_TRUE(inSteadyStateReplication);
return Status::OK();
};
- ASSERT_TRUE(_txn->writesAreReplicated());
- ASSERT_FALSE(documentValidationDisabled(_txn.get()));
- ASSERT_OK(SyncTail::syncApply(_txn.get(), op, true, applyOp, failedApplyCommand, _incOps));
+ ASSERT_TRUE(_opCtx->writesAreReplicated());
+ ASSERT_FALSE(documentValidationDisabled(_opCtx.get()));
+ ASSERT_OK(SyncTail::syncApply(_opCtx.get(), op, true, applyOp, failedApplyCommand, _incOps));
ASSERT_TRUE(applyOpCalled);
}
@@ -362,9 +363,9 @@ TEST_F(SyncTailTest, SyncApplyInsertDocumentDatabaseMissing) {
TEST_F(SyncTailTest, SyncApplyInsertDocumentCollectionMissing) {
{
- Lock::GlobalWrite globalLock(_txn->lockState());
+ Lock::GlobalWrite globalLock(_opCtx->lockState());
bool justCreated = false;
- Database* db = dbHolder().openDb(_txn.get(), "test", &justCreated);
+ Database* db = dbHolder().openDb(_opCtx.get(), "test", &justCreated);
ASSERT_TRUE(db);
ASSERT_TRUE(justCreated);
}
@@ -373,12 +374,12 @@ TEST_F(SyncTailTest, SyncApplyInsertDocumentCollectionMissing) {
TEST_F(SyncTailTest, SyncApplyInsertDocumentCollectionExists) {
{
- Lock::GlobalWrite globalLock(_txn->lockState());
+ Lock::GlobalWrite globalLock(_opCtx->lockState());
bool justCreated = false;
- Database* db = dbHolder().openDb(_txn.get(), "test", &justCreated);
+ Database* db = dbHolder().openDb(_opCtx.get(), "test", &justCreated);
ASSERT_TRUE(db);
ASSERT_TRUE(justCreated);
- Collection* collection = db->createCollection(_txn.get(), "test.t");
+ Collection* collection = db->createCollection(_opCtx.get(), "test.t");
ASSERT_TRUE(collection);
}
_testSyncApplyInsertDocument(MODE_IX);
@@ -390,24 +391,24 @@ TEST_F(SyncTailTest, SyncApplyIndexBuild) {
<< "ns"
<< "test.system.indexes");
bool applyOpCalled = false;
- SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* txn,
+ SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* opCtx,
Database* db,
const BSONObj& theOperation,
bool inSteadyStateReplication,
stdx::function<void()>) {
applyOpCalled = true;
- ASSERT_TRUE(txn);
- ASSERT_TRUE(txn->lockState()->isDbLockedForMode("test", MODE_X));
- ASSERT_FALSE(txn->writesAreReplicated());
- ASSERT_TRUE(documentValidationDisabled(txn));
+ ASSERT_TRUE(opCtx);
+ ASSERT_TRUE(opCtx->lockState()->isDbLockedForMode("test", MODE_X));
+ ASSERT_FALSE(opCtx->writesAreReplicated());
+ ASSERT_TRUE(documentValidationDisabled(opCtx));
ASSERT_TRUE(db);
ASSERT_BSONOBJ_EQ(op, theOperation);
ASSERT_FALSE(inSteadyStateReplication);
return Status::OK();
};
- ASSERT_TRUE(_txn->writesAreReplicated());
- ASSERT_FALSE(documentValidationDisabled(_txn.get()));
- ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, failedApplyCommand, _incOps));
+ ASSERT_TRUE(_opCtx->writesAreReplicated());
+ ASSERT_FALSE(documentValidationDisabled(_opCtx.get()));
+ ASSERT_OK(SyncTail::syncApply(_opCtx.get(), op, false, applyOp, failedApplyCommand, _incOps));
ASSERT_TRUE(applyOpCalled);
}
@@ -417,7 +418,7 @@ TEST_F(SyncTailTest, SyncApplyCommand) {
<< "ns"
<< "test.t");
bool applyCmdCalled = false;
- SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* txn,
+ SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* opCtx,
Database* db,
const BSONObj& theOperation,
bool inSteadyStateReplication,
@@ -426,18 +427,18 @@ TEST_F(SyncTailTest, SyncApplyCommand) {
return Status::OK();
};
SyncTail::ApplyCommandInLockFn applyCmd =
- [&](OperationContext* txn, const BSONObj& theOperation, bool inSteadyStateReplication) {
+ [&](OperationContext* opCtx, const BSONObj& theOperation, bool inSteadyStateReplication) {
applyCmdCalled = true;
- ASSERT_TRUE(txn);
- ASSERT_TRUE(txn->lockState()->isW());
- ASSERT_TRUE(txn->writesAreReplicated());
- ASSERT_FALSE(documentValidationDisabled(txn));
+ ASSERT_TRUE(opCtx);
+ ASSERT_TRUE(opCtx->lockState()->isW());
+ ASSERT_TRUE(opCtx->writesAreReplicated());
+ ASSERT_FALSE(documentValidationDisabled(opCtx));
ASSERT_BSONOBJ_EQ(op, theOperation);
return Status::OK();
};
- ASSERT_TRUE(_txn->writesAreReplicated());
- ASSERT_FALSE(documentValidationDisabled(_txn.get()));
- ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, applyCmd, _incOps));
+ ASSERT_TRUE(_opCtx->writesAreReplicated());
+ ASSERT_FALSE(documentValidationDisabled(_opCtx.get()));
+ ASSERT_OK(SyncTail::syncApply(_opCtx.get(), op, false, applyOp, applyCmd, _incOps));
ASSERT_TRUE(applyCmdCalled);
ASSERT_EQUALS(1U, _opsApplied);
}
@@ -448,7 +449,7 @@ TEST_F(SyncTailTest, SyncApplyCommandThrowsException) {
<< "ns"
<< "test.t");
int applyCmdCalled = 0;
- SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* txn,
+ SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* opCtx,
Database* db,
const BSONObj& theOperation,
bool inSteadyStateReplication,
@@ -457,14 +458,14 @@ TEST_F(SyncTailTest, SyncApplyCommandThrowsException) {
return Status::OK();
};
SyncTail::ApplyCommandInLockFn applyCmd =
- [&](OperationContext* txn, const BSONObj& theOperation, bool inSteadyStateReplication) {
+ [&](OperationContext* opCtx, const BSONObj& theOperation, bool inSteadyStateReplication) {
applyCmdCalled++;
if (applyCmdCalled < 5) {
throw WriteConflictException();
}
return Status::OK();
};
- ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, applyCmd, _incOps));
+ ASSERT_OK(SyncTail::syncApply(_opCtx.get(), op, false, applyOp, applyCmd, _incOps));
ASSERT_EQUALS(5, applyCmdCalled);
ASSERT_EQUALS(1U, _opsApplied);
}
@@ -479,14 +480,14 @@ TEST_F(SyncTailTest, MultiApplyReturnsBadValueOnNullOperationContext) {
TEST_F(SyncTailTest, MultiApplyReturnsBadValueOnNullWriterPool) {
auto op = makeCreateCollectionOplogEntry({Timestamp(Seconds(1), 0), 1LL});
- auto status = multiApply(_txn.get(), nullptr, {op}, noopApplyOperationFn).getStatus();
+ auto status = multiApply(_opCtx.get(), nullptr, {op}, noopApplyOperationFn).getStatus();
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "invalid worker pool");
}
TEST_F(SyncTailTest, MultiApplyReturnsEmptyArrayOperationWhenNoOperationsAreGiven) {
auto writerPool = SyncTail::makeWriterPool();
- auto status = multiApply(_txn.get(), writerPool.get(), {}, noopApplyOperationFn).getStatus();
+ auto status = multiApply(_opCtx.get(), writerPool.get(), {}, noopApplyOperationFn).getStatus();
ASSERT_EQUALS(ErrorCodes::EmptyArrayOperation, status);
ASSERT_STRING_CONTAINS(status.reason(), "no operations provided to multiApply");
}
@@ -495,12 +496,13 @@ TEST_F(SyncTailTest, MultiApplyReturnsBadValueOnNullApplyOperation) {
auto writerPool = SyncTail::makeWriterPool();
MultiApplier::ApplyOperationFn nullApplyOperationFn;
auto op = makeCreateCollectionOplogEntry({Timestamp(Seconds(1), 0), 1LL});
- auto status = multiApply(_txn.get(), writerPool.get(), {op}, nullApplyOperationFn).getStatus();
+ auto status =
+ multiApply(_opCtx.get(), writerPool.get(), {op}, nullApplyOperationFn).getStatus();
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "invalid apply operation function");
}
-bool _testOplogEntryIsForCappedCollection(OperationContext* txn,
+bool _testOplogEntryIsForCappedCollection(OperationContext* opCtx,
const NamespaceString& nss,
const CollectionOptions& options) {
auto writerPool = SyncTail::makeWriterPool();
@@ -512,13 +514,13 @@ bool _testOplogEntryIsForCappedCollection(OperationContext* txn,
}
return Status::OK();
};
- createCollection(txn, nss, options);
+ createCollection(opCtx, nss, options);
auto op = makeInsertDocumentOplogEntry({Timestamp(Seconds(1), 0), 1LL}, nss, BSON("a" << 1));
ASSERT_FALSE(op.isForCappedCollection);
auto lastOpTime =
- unittest::assertGet(multiApply(txn, writerPool.get(), {op}, applyOperationFn));
+ unittest::assertGet(multiApply(opCtx, writerPool.get(), {op}, applyOperationFn));
ASSERT_EQUALS(op.getOpTime(), lastOpTime);
ASSERT_EQUALS(1U, operationsApplied.size());
@@ -532,14 +534,14 @@ TEST_F(
SyncTailTest,
MultiApplyDoesNotSetOplogEntryIsForCappedCollectionWhenProcessingNonCappedCollectionInsertOperation) {
NamespaceString nss("local." + _agent.getSuiteName() + "_" + _agent.getTestName());
- ASSERT_FALSE(_testOplogEntryIsForCappedCollection(_txn.get(), nss, CollectionOptions()));
+ ASSERT_FALSE(_testOplogEntryIsForCappedCollection(_opCtx.get(), nss, CollectionOptions()));
}
TEST_F(SyncTailTest,
MultiApplySetsOplogEntryIsForCappedCollectionWhenProcessingCappedCollectionInsertOperation) {
NamespaceString nss("local." + _agent.getSuiteName() + "_" + _agent.getTestName());
ASSERT_TRUE(
- _testOplogEntryIsForCappedCollection(_txn.get(), nss, createOplogCollectionOptions()));
+ _testOplogEntryIsForCappedCollection(_opCtx.get(), nss, createOplogCollectionOptions()));
}
TEST_F(SyncTailTest, MultiApplyAssignsOperationsToWriterThreadsBasedOnNamespaceHash) {
@@ -569,7 +571,7 @@ TEST_F(SyncTailTest, MultiApplyAssignsOperationsToWriterThreadsBasedOnNamespaceH
NamespaceString nssForInsert;
std::vector<BSONObj> operationsWrittenToOplog;
_storageInterface->insertDocumentsFn = [&mutex, &nssForInsert, &operationsWrittenToOplog](
- OperationContext* txn, const NamespaceString& nss, const std::vector<BSONObj>& docs) {
+ OperationContext* opCtx, const NamespaceString& nss, const std::vector<BSONObj>& docs) {
stdx::lock_guard<stdx::mutex> lock(mutex);
nssForInsert = nss;
operationsWrittenToOplog = docs;
@@ -577,7 +579,7 @@ TEST_F(SyncTailTest, MultiApplyAssignsOperationsToWriterThreadsBasedOnNamespaceH
};
auto lastOpTime =
- unittest::assertGet(multiApply(_txn.get(), &writerPool, {op1, op2}, applyOperationFn));
+ unittest::assertGet(multiApply(_opCtx.get(), &writerPool, {op1, op2}, applyOperationFn));
ASSERT_EQUALS(op2.getOpTime(), lastOpTime);
// Each writer thread should be given exactly one operation to apply.
@@ -606,28 +608,28 @@ TEST_F(SyncTailTest, MultiApplyAssignsOperationsToWriterThreadsBasedOnNamespaceH
TEST_F(SyncTailTest, MultiSyncApplyUsesSyncApplyToApplyOperation) {
NamespaceString nss("local." + _agent.getSuiteName() + "_" + _agent.getTestName());
auto op = makeCreateCollectionOplogEntry({Timestamp(Seconds(1), 0), 1LL}, nss);
- _txn.reset();
+ _opCtx.reset();
MultiApplier::OperationPtrs ops = {&op};
multiSyncApply(&ops, nullptr);
// Collection should be created after SyncTail::syncApply() processes operation.
- _txn = cc().makeOperationContext();
- ASSERT_TRUE(AutoGetCollectionForRead(_txn.get(), nss).getCollection());
+ _opCtx = cc().makeOperationContext();
+ ASSERT_TRUE(AutoGetCollectionForRead(_opCtx.get(), nss).getCollection());
}
TEST_F(SyncTailTest, MultiSyncApplyDisablesDocumentValidationWhileApplyingOperations) {
NamespaceString nss("local." + _agent.getSuiteName() + "_" + _agent.getTestName());
- auto syncApply = [](OperationContext* txn, const BSONObj&, bool convertUpdatesToUpserts) {
- ASSERT_FALSE(txn->writesAreReplicated());
- ASSERT_FALSE(txn->lockState()->shouldConflictWithSecondaryBatchApplication());
- ASSERT_TRUE(documentValidationDisabled(txn));
+ auto syncApply = [](OperationContext* opCtx, const BSONObj&, bool convertUpdatesToUpserts) {
+ ASSERT_FALSE(opCtx->writesAreReplicated());
+ ASSERT_FALSE(opCtx->lockState()->shouldConflictWithSecondaryBatchApplication());
+ ASSERT_TRUE(documentValidationDisabled(opCtx));
ASSERT_TRUE(convertUpdatesToUpserts);
return Status::OK();
};
auto op = makeUpdateDocumentOplogEntry(
{Timestamp(Seconds(1), 0), 1LL}, nss, BSON("_id" << 0), BSON("_id" << 0 << "x" << 2));
MultiApplier::OperationPtrs ops = {&op};
- ASSERT_OK(multiSyncApply_noAbort(_txn.get(), &ops, syncApply));
+ ASSERT_OK(multiSyncApply_noAbort(_opCtx.get(), &ops, syncApply));
}
TEST_F(SyncTailTest, MultiSyncApplyPassesThroughSyncApplyErrorAfterFailingToApplyOperation) {
@@ -640,7 +642,8 @@ TEST_F(SyncTailTest, MultiSyncApplyPassesThroughSyncApplyErrorAfterFailingToAppl
return {ErrorCodes::OperationFailed, ""};
};
MultiApplier::OperationPtrs ops = {&op};
- ASSERT_EQUALS(ErrorCodes::OperationFailed, multiSyncApply_noAbort(_txn.get(), &ops, syncApply));
+ ASSERT_EQUALS(ErrorCodes::OperationFailed,
+ multiSyncApply_noAbort(_opCtx.get(), &ops, syncApply));
}
TEST_F(SyncTailTest, MultiSyncApplyPassesThroughSyncApplyException) {
@@ -654,7 +657,8 @@ TEST_F(SyncTailTest, MultiSyncApplyPassesThroughSyncApplyException) {
MONGO_UNREACHABLE;
};
MultiApplier::OperationPtrs ops = {&op};
- ASSERT_EQUALS(ErrorCodes::OperationFailed, multiSyncApply_noAbort(_txn.get(), &ops, syncApply));
+ ASSERT_EQUALS(ErrorCodes::OperationFailed,
+ multiSyncApply_noAbort(_opCtx.get(), &ops, syncApply));
}
TEST_F(SyncTailTest, MultiSyncApplySortsOperationsStablyByNamespaceBeforeApplying) {
@@ -677,7 +681,7 @@ TEST_F(SyncTailTest, MultiSyncApplySortsOperationsStablyByNamespaceBeforeApplyin
return Status::OK();
};
MultiApplier::OperationPtrs ops = {&op4, &op1, &op3, &op2};
- ASSERT_OK(multiSyncApply_noAbort(_txn.get(), &ops, syncApply));
+ ASSERT_OK(multiSyncApply_noAbort(_opCtx.get(), &ops, syncApply));
ASSERT_EQUALS(4U, operationsApplied.size());
ASSERT_EQUALS(op1, operationsApplied[0]);
ASSERT_EQUALS(op2, operationsApplied[1]);
@@ -707,7 +711,7 @@ TEST_F(SyncTailTest, MultiSyncApplyGroupsInsertOperationByNamespaceBeforeApplyin
MultiApplier::OperationPtrs ops = {
&createOp1, &createOp2, &insertOp1a, &insertOp2a, &insertOp1b, &insertOp2b};
- ASSERT_OK(multiSyncApply_noAbort(_txn.get(), &ops, syncApply));
+ ASSERT_OK(multiSyncApply_noAbort(_opCtx.get(), &ops, syncApply));
ASSERT_EQUALS(4U, operationsApplied.size());
ASSERT_EQUALS(createOp1, operationsApplied[0]);
@@ -761,7 +765,7 @@ TEST_F(SyncTailTest, MultiSyncApplyUsesLimitWhenGroupingInsertOperation) {
for (auto&& op : operationsToApply) {
ops.push_back(&op);
}
- ASSERT_OK(multiSyncApply_noAbort(_txn.get(), &ops, syncApply));
+ ASSERT_OK(multiSyncApply_noAbort(_opCtx.get(), &ops, syncApply));
// multiSyncApply should combine operations as follows:
// {create}, {grouped_insert}, {insert_(limit+1)}
@@ -820,7 +824,7 @@ TEST_F(SyncTailTest, MultiSyncApplyFallsBackOnApplyingInsertsIndividuallyWhenGro
for (auto&& op : operationsToApply) {
ops.push_back(&op);
}
- ASSERT_OK(multiSyncApply_noAbort(_txn.get(), &ops, syncApply));
+ ASSERT_OK(multiSyncApply_noAbort(_opCtx.get(), &ops, syncApply));
// On failing to apply the grouped insert operation, multiSyncApply should apply the operations
// as given in "operationsToApply":
@@ -845,7 +849,7 @@ TEST_F(SyncTailTest, MultiInitialSyncApplyDisablesDocumentValidationWhileApplyin
{Timestamp(Seconds(1), 0), 1LL}, nss, BSON("_id" << 0), BSON("_id" << 0 << "x" << 2));
MultiApplier::OperationPtrs ops = {&op};
AtomicUInt32 fetchCount(0);
- ASSERT_OK(multiInitialSyncApply_noAbort(_txn.get(), &ops, &syncTail, &fetchCount));
+ ASSERT_OK(multiInitialSyncApply_noAbort(_opCtx.get(), &ops, &syncTail, &fetchCount));
ASSERT_EQUALS(fetchCount.load(), 1U);
}
@@ -858,11 +862,11 @@ TEST_F(SyncTailTest,
{Timestamp(Seconds(1), 0), 1LL}, nss, BSON("_id" << 0), BSON("_id" << 0 << "x" << 2));
MultiApplier::OperationPtrs ops = {&op};
AtomicUInt32 fetchCount(0);
- ASSERT_OK(multiInitialSyncApply_noAbort(_txn.get(), &ops, &syncTail, &fetchCount));
+ ASSERT_OK(multiInitialSyncApply_noAbort(_opCtx.get(), &ops, &syncTail, &fetchCount));
// Since the missing document is not found on the sync source, the collection referenced by
// the failed operation should not be automatically created.
- ASSERT_FALSE(AutoGetCollectionForRead(_txn.get(), nss).getCollection());
+ ASSERT_FALSE(AutoGetCollectionForRead(_opCtx.get(), nss).getCollection());
ASSERT_EQUALS(fetchCount.load(), 1U);
}
@@ -880,10 +884,10 @@ TEST_F(SyncTailTest, MultiInitialSyncApplySkipsDocumentOnNamespaceNotFound) {
auto op3 = makeInsertDocumentOplogEntry({Timestamp(Seconds(4), 0), 1LL}, nss, doc3);
MultiApplier::OperationPtrs ops = {&op0, &op1, &op2, &op3};
AtomicUInt32 fetchCount(0);
- ASSERT_OK(multiInitialSyncApply_noAbort(_txn.get(), &ops, &syncTail, &fetchCount));
+ ASSERT_OK(multiInitialSyncApply_noAbort(_opCtx.get(), &ops, &syncTail, &fetchCount));
ASSERT_EQUALS(fetchCount.load(), 0U);
- OplogInterfaceLocal collectionReader(_txn.get(), nss.ns());
+ OplogInterfaceLocal collectionReader(_opCtx.get(), nss.ns());
auto iter = collectionReader.makeIterator();
ASSERT_BSONOBJ_EQ(doc3, unittest::assertGet(iter->next()).first);
ASSERT_BSONOBJ_EQ(doc1, unittest::assertGet(iter->next()).first);
@@ -898,13 +902,13 @@ TEST_F(SyncTailTest, MultiInitialSyncApplyRetriesFailedUpdateIfDocumentIsAvailab
{Timestamp(Seconds(1), 0), 1LL}, nss, BSON("_id" << 0), updatedDocument);
MultiApplier::OperationPtrs ops = {&op};
AtomicUInt32 fetchCount(0);
- ASSERT_OK(multiInitialSyncApply_noAbort(_txn.get(), &ops, &syncTail, &fetchCount));
+ ASSERT_OK(multiInitialSyncApply_noAbort(_opCtx.get(), &ops, &syncTail, &fetchCount));
ASSERT_EQUALS(fetchCount.load(), 1U);
// The collection referenced by "ns" in the failed operation is automatically created to hold
// the missing document fetched from the sync source. We verify the contents of the collection
// with the OplogInterfaceLocal class.
- OplogInterfaceLocal collectionReader(_txn.get(), nss.ns());
+ OplogInterfaceLocal collectionReader(_opCtx.get(), nss.ns());
auto iter = collectionReader.makeIterator();
ASSERT_BSONOBJ_EQ(updatedDocument, unittest::assertGet(iter->next()).first);
ASSERT_EQUALS(ErrorCodes::CollectionIsEmpty, iter->next().getStatus());
@@ -920,7 +924,7 @@ TEST_F(SyncTailTest, MultiInitialSyncApplyPassesThroughSyncApplyErrorAfterFailin
MultiApplier::OperationPtrs ops = {&op};
AtomicUInt32 fetchCount(0);
ASSERT_EQUALS(ErrorCodes::BadValue,
- multiInitialSyncApply_noAbort(_txn.get(), &ops, &syncTail, &fetchCount));
+ multiInitialSyncApply_noAbort(_opCtx.get(), &ops, &syncTail, &fetchCount));
ASSERT_EQUALS(fetchCount.load(), 1U);
}
@@ -929,12 +933,13 @@ TEST_F(SyncTailTest, MultiInitialSyncApplyPassesThroughShouldSyncTailRetryError)
NamespaceString nss("local." + _agent.getSuiteName() + "_" + _agent.getTestName());
auto op = makeUpdateDocumentOplogEntry(
{Timestamp(Seconds(1), 0), 1LL}, nss, BSON("_id" << 0), BSON("_id" << 0 << "x" << 2));
- ASSERT_THROWS_CODE(
- syncTail.shouldRetry(_txn.get(), op.raw), mongo::UserException, ErrorCodes::FailedToParse);
+ ASSERT_THROWS_CODE(syncTail.shouldRetry(_opCtx.get(), op.raw),
+ mongo::UserException,
+ ErrorCodes::FailedToParse);
MultiApplier::OperationPtrs ops = {&op};
AtomicUInt32 fetchCount(0);
ASSERT_EQUALS(ErrorCodes::FailedToParse,
- multiInitialSyncApply_noAbort(_txn.get(), &ops, &syncTail, &fetchCount));
+ multiInitialSyncApply_noAbort(_opCtx.get(), &ops, &syncTail, &fetchCount));
ASSERT_EQUALS(fetchCount.load(), 1U);
}
@@ -970,7 +975,7 @@ Status IdempotencyTest::runOps(std::initializer_list<OplogEntry> ops) {
opsPtrs.push_back(&op);
}
AtomicUInt32 fetchCount(0);
- return multiInitialSyncApply_noAbort(_txn.get(), &opsPtrs, &syncTail, &fetchCount);
+ return multiInitialSyncApply_noAbort(_opCtx.get(), &opsPtrs, &syncTail, &fetchCount);
}
OplogEntry IdempotencyTest::createCollection() {
@@ -1002,21 +1007,21 @@ OplogEntry IdempotencyTest::dropIndex(const std::string& indexName) {
}
std::string IdempotencyTest::validate() {
- auto collection = AutoGetCollectionForRead(_txn.get(), nss).getCollection();
+ auto collection = AutoGetCollectionForRead(_opCtx.get(), nss).getCollection();
if (!collection) {
return "CollectionNotFound";
}
ValidateResults validateResults;
BSONObjBuilder bob;
- Lock::DBLock lk(_txn->lockState(), nss.db(), MODE_IS);
- Lock::CollectionLock lock(_txn->lockState(), nss.ns(), MODE_IS);
- ASSERT_OK(collection->validate(_txn.get(), kValidateFull, &validateResults, &bob));
+ Lock::DBLock lk(_opCtx->lockState(), nss.db(), MODE_IS);
+ Lock::CollectionLock lock(_opCtx->lockState(), nss.ns(), MODE_IS);
+ ASSERT_OK(collection->validate(_opCtx.get(), kValidateFull, &validateResults, &bob));
ASSERT_TRUE(validateResults.valid);
- IndexDescriptor* desc = collection->getIndexCatalog()->findIdIndex(_txn.get());
+ IndexDescriptor* desc = collection->getIndexCatalog()->findIdIndex(_opCtx.get());
ASSERT_TRUE(desc);
- auto exec = InternalPlanner::indexScan(_txn.get(),
+ auto exec = InternalPlanner::indexScan(_opCtx.get(),
collection,
desc,
BSONObj(),
@@ -1041,7 +1046,7 @@ std::string IdempotencyTest::validate() {
}
TEST_F(IdempotencyTest, Geo2dsphereIndexFailedOnUpdate) {
- ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_RECOVERING);
+ ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_RECOVERING);
ASSERT_OK(runOp(createCollection()));
auto insertOp = insert(fromjson("{_id: 1, loc: 'hi'}"));
auto updateOp = update(1, fromjson("{$set: {loc: [1, 2]}}"));
@@ -1054,13 +1059,13 @@ TEST_F(IdempotencyTest, Geo2dsphereIndexFailedOnUpdate) {
ASSERT_OK(runOps(ops));
ASSERT_EQUALS(hash, validate());
- ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_PRIMARY);
+ ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_PRIMARY);
auto status = runOps(ops);
ASSERT_EQ(status.code(), 16755);
}
TEST_F(IdempotencyTest, Geo2dsphereIndexFailedOnIndexing) {
- ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_RECOVERING);
+ ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_RECOVERING);
ASSERT_OK(runOp(createCollection()));
auto indexOp = buildIndex(fromjson("{loc: '2dsphere'}"), BSON("2dsphereIndexVersion" << 3));
auto dropIndexOp = dropIndex("loc_index");
@@ -1073,13 +1078,13 @@ TEST_F(IdempotencyTest, Geo2dsphereIndexFailedOnIndexing) {
ASSERT_OK(runOps(ops));
ASSERT_EQUALS(hash, validate());
- ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_PRIMARY);
+ ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_PRIMARY);
auto status = runOps(ops);
ASSERT_EQ(status.code(), 16755);
}
TEST_F(IdempotencyTest, Geo2dIndex) {
- ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_RECOVERING);
+ ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_RECOVERING);
ASSERT_OK(runOp(createCollection()));
auto insertOp = insert(fromjson("{_id: 1, loc: [1]}"));
auto updateOp = update(1, fromjson("{$set: {loc: [1, 2]}}"));
@@ -1092,13 +1097,13 @@ TEST_F(IdempotencyTest, Geo2dIndex) {
ASSERT_OK(runOps(ops));
ASSERT_EQUALS(hash, validate());
- ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_PRIMARY);
+ ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_PRIMARY);
auto status = runOps(ops);
ASSERT_EQ(status.code(), 13068);
}
TEST_F(IdempotencyTest, UniqueKeyIndex) {
- ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_RECOVERING);
+ ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_RECOVERING);
ASSERT_OK(runOp(createCollection()));
auto insertOp = insert(fromjson("{_id: 1, x: 5}"));
auto updateOp = update(1, fromjson("{$set: {x: 6}}"));
@@ -1112,13 +1117,13 @@ TEST_F(IdempotencyTest, UniqueKeyIndex) {
ASSERT_OK(runOps(ops));
ASSERT_EQUALS(hash, validate());
- ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_PRIMARY);
+ ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_PRIMARY);
auto status = runOps(ops);
ASSERT_EQ(status.code(), ErrorCodes::DuplicateKey);
}
TEST_F(IdempotencyTest, ParallelArrayError) {
- ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_RECOVERING);
+ ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_RECOVERING);
ASSERT_OK(runOp(createCollection()));
ASSERT_OK(runOp(insert(fromjson("{_id: 1}"))));
@@ -1135,13 +1140,13 @@ TEST_F(IdempotencyTest, ParallelArrayError) {
ASSERT_OK(runOps(ops));
ASSERT_EQUALS(hash, validate());
- ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_PRIMARY);
+ ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_PRIMARY);
auto status = runOps(ops);
ASSERT_EQ(status.code(), ErrorCodes::CannotIndexParallelArrays);
}
TEST_F(IdempotencyTest, IndexKeyTooLongError) {
- ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_RECOVERING);
+ ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_RECOVERING);
ASSERT_OK(runOp(createCollection()));
ASSERT_OK(runOp(insert(fromjson("{_id: 1}"))));
@@ -1161,13 +1166,13 @@ TEST_F(IdempotencyTest, IndexKeyTooLongError) {
ASSERT_OK(runOps(ops));
ASSERT_EQUALS(hash, validate());
- ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_PRIMARY);
+ ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_PRIMARY);
auto status = runOps(ops);
ASSERT_EQ(status.code(), ErrorCodes::KeyTooLong);
}
TEST_F(IdempotencyTest, IndexWithDifferentOptions) {
- ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_RECOVERING);
+ ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_RECOVERING);
ASSERT_OK(runOp(createCollection()));
ASSERT_OK(runOp(insert(fromjson("{_id: 1, x: 'hi'}"))));
@@ -1183,13 +1188,13 @@ TEST_F(IdempotencyTest, IndexWithDifferentOptions) {
ASSERT_OK(runOps(ops));
ASSERT_EQUALS(hash, validate());
- ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_PRIMARY);
+ ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_PRIMARY);
auto status = runOps(ops);
ASSERT_EQ(status.code(), ErrorCodes::IndexOptionsConflict);
}
TEST_F(IdempotencyTest, TextIndexDocumentHasNonStringLanguageField) {
- ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_RECOVERING);
+ ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_RECOVERING);
ASSERT_OK(runOp(createCollection()));
auto insertOp = insert(fromjson("{_id: 1, x: 'words to index', language: 1}"));
@@ -1203,13 +1208,13 @@ TEST_F(IdempotencyTest, TextIndexDocumentHasNonStringLanguageField) {
ASSERT_OK(runOps(ops));
ASSERT_EQUALS(hash, validate());
- ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_PRIMARY);
+ ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_PRIMARY);
auto status = runOps(ops);
ASSERT_EQ(status.code(), 17261);
}
TEST_F(IdempotencyTest, InsertDocumentWithNonStringLanguageFieldWhenTextIndexExists) {
- ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_RECOVERING);
+ ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_RECOVERING);
ASSERT_OK(runOp(createCollection()));
auto indexOp = buildIndex(fromjson("{x: 'text'}"), BSONObj());
@@ -1223,13 +1228,13 @@ TEST_F(IdempotencyTest, InsertDocumentWithNonStringLanguageFieldWhenTextIndexExi
ASSERT_OK(runOps(ops));
ASSERT_EQUALS(hash, validate());
- ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_PRIMARY);
+ ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_PRIMARY);
auto status = runOps(ops);
ASSERT_EQ(status.code(), 17261);
}
TEST_F(IdempotencyTest, TextIndexDocumentHasNonStringLanguageOverrideField) {
- ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_RECOVERING);
+ ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_RECOVERING);
ASSERT_OK(runOp(createCollection()));
auto insertOp = insert(fromjson("{_id: 1, x: 'words to index', y: 1}"));
@@ -1243,13 +1248,13 @@ TEST_F(IdempotencyTest, TextIndexDocumentHasNonStringLanguageOverrideField) {
ASSERT_OK(runOps(ops));
ASSERT_EQUALS(hash, validate());
- ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_PRIMARY);
+ ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_PRIMARY);
auto status = runOps(ops);
ASSERT_EQ(status.code(), 17261);
}
TEST_F(IdempotencyTest, InsertDocumentWithNonStringLanguageOverrideFieldWhenTextIndexExists) {
- ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_RECOVERING);
+ ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_RECOVERING);
ASSERT_OK(runOp(createCollection()));
auto indexOp = buildIndex(fromjson("{x: 'text'}"), fromjson("{language_override: 'y'}"));
@@ -1263,13 +1268,13 @@ TEST_F(IdempotencyTest, InsertDocumentWithNonStringLanguageOverrideFieldWhenText
ASSERT_OK(runOps(ops));
ASSERT_EQUALS(hash, validate());
- ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_PRIMARY);
+ ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_PRIMARY);
auto status = runOps(ops);
ASSERT_EQ(status.code(), 17261);
}
TEST_F(IdempotencyTest, TextIndexDocumentHasUnknownLanguage) {
- ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_RECOVERING);
+ ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_RECOVERING);
ASSERT_OK(runOp(createCollection()));
auto insertOp = insert(fromjson("{_id: 1, x: 'words to index', language: 'bad'}"));
@@ -1283,7 +1288,7 @@ TEST_F(IdempotencyTest, TextIndexDocumentHasUnknownLanguage) {
ASSERT_OK(runOps(ops));
ASSERT_EQUALS(hash, validate());
- ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_PRIMARY);
+ ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_PRIMARY);
auto status = runOps(ops);
ASSERT_EQ(status.code(), 17262);
}
@@ -1428,7 +1433,7 @@ TEST_F(IdempotencyTest, CollModIndexNotFound) {
}
TEST_F(IdempotencyTest, ResyncOnRenameCollection) {
- ReplicationCoordinator::get(_txn.get())->setFollowerMode(MemberState::RS_RECOVERING);
+ ReplicationCoordinator::get(_opCtx.get())->setFollowerMode(MemberState::RS_RECOVERING);
auto cmd = BSON("renameCollection" << nss.ns() << "to"
<< "test.bar"
diff --git a/src/mongo/db/repl/task_runner.cpp b/src/mongo/db/repl/task_runner.cpp
index 210718bba3e..d364151a228 100644
--- a/src/mongo/db/repl/task_runner.cpp
+++ b/src/mongo/db/repl/task_runner.cpp
@@ -60,10 +60,10 @@ using LockGuard = stdx::lock_guard<stdx::mutex>;
* next action of kCancel.
*/
TaskRunner::NextAction runSingleTask(const TaskRunner::Task& task,
- OperationContext* txn,
+ OperationContext* opCtx,
const Status& status) {
try {
- return task(txn, status);
+ return task(opCtx, status);
} catch (...) {
log() << "Unhandled exception in task runner: " << redact(exceptionToStatus());
}
@@ -74,7 +74,7 @@ TaskRunner::NextAction runSingleTask(const TaskRunner::Task& task,
// static
TaskRunner::Task TaskRunner::makeCancelTask() {
- return [](OperationContext* txn, const Status& status) { return NextAction::kCancel; };
+ return [](OperationContext* opCtx, const Status& status) { return NextAction::kCancel; };
}
TaskRunner::TaskRunner(OldThreadPool* threadPool)
@@ -132,10 +132,10 @@ void TaskRunner::join() {
void TaskRunner::_runTasks() {
Client* client = nullptr;
- ServiceContext::UniqueOperationContext txn;
+ ServiceContext::UniqueOperationContext opCtx;
while (Task task = _waitForNextTask()) {
- if (!txn) {
+ if (!opCtx) {
if (!client) {
// We initialize cc() because ServiceContextMongoD::_newOpCtx() expects cc()
// to be equal to the client used to create the operation context.
@@ -145,13 +145,13 @@ void TaskRunner::_runTasks() {
AuthorizationSession::get(client)->grantInternalAuthorization();
}
}
- txn = client->makeOperationContext();
+ opCtx = client->makeOperationContext();
}
- NextAction nextAction = runSingleTask(task, txn.get(), Status::OK());
+ NextAction nextAction = runSingleTask(task, opCtx.get(), Status::OK());
if (nextAction != NextAction::kKeepOperationContext) {
- txn.reset();
+ opCtx.reset();
}
if (nextAction == NextAction::kCancel) {
@@ -167,7 +167,7 @@ void TaskRunner::_runTasks() {
}
}
}
- txn.reset();
+ opCtx.reset();
std::list<Task> tasks;
UniqueLock lk{_mutex};
@@ -221,13 +221,13 @@ Status TaskRunner::runSynchronousTask(SynchronousTask func, TaskRunner::NextActi
stdx::condition_variable waitTillDoneCond;
Status returnStatus{Status::OK()};
- this->schedule([&](OperationContext* txn, const Status taskStatus) {
+ this->schedule([&](OperationContext* opCtx, const Status taskStatus) {
if (!taskStatus.isOK()) {
returnStatus = taskStatus;
} else {
// Run supplied function.
try {
- returnStatus = func(txn);
+ returnStatus = func(opCtx);
} catch (...) {
returnStatus = exceptionToStatus();
error() << "Exception thrown in runSynchronousTask: " << redact(returnStatus);
diff --git a/src/mongo/db/repl/task_runner.h b/src/mongo/db/repl/task_runner.h
index a8908660c44..b7dcf4c05d6 100644
--- a/src/mongo/db/repl/task_runner.h
+++ b/src/mongo/db/repl/task_runner.h
@@ -59,7 +59,7 @@ public:
};
using Task = stdx::function<NextAction(OperationContext*, const Status&)>;
- using SynchronousTask = stdx::function<Status(OperationContext* txn)>;
+ using SynchronousTask = stdx::function<Status(OperationContext* opCtx)>;
/**
* Returns the Status from the supplied function after running it..
diff --git a/src/mongo/db/repl/task_runner_test.cpp b/src/mongo/db/repl/task_runner_test.cpp
index dedb6269083..62b64513b37 100644
--- a/src/mongo/db/repl/task_runner_test.cpp
+++ b/src/mongo/db/repl/task_runner_test.cpp
@@ -58,12 +58,12 @@ TEST_F(TaskRunnerTest, GetDiagnosticString) {
TEST_F(TaskRunnerTest, CallbackValues) {
stdx::mutex mutex;
bool called = false;
- OperationContext* txn = nullptr;
+ OperationContext* opCtx = nullptr;
Status status = getDetectableErrorStatus();
auto task = [&](OperationContext* theTxn, const Status& theStatus) {
stdx::lock_guard<stdx::mutex> lk(mutex);
called = true;
- txn = theTxn;
+ opCtx = theTxn;
status = theStatus;
return TaskRunner::NextAction::kCancel;
};
@@ -73,7 +73,7 @@ TEST_F(TaskRunnerTest, CallbackValues) {
stdx::lock_guard<stdx::mutex> lk(mutex);
ASSERT_TRUE(called);
- ASSERT(txn);
+ ASSERT(opCtx);
ASSERT_OK(status);
}
@@ -149,7 +149,7 @@ TEST_F(TaskRunnerTest, RunTaskTwiceKeepOperationContext) {
TEST_F(TaskRunnerTest, SkipSecondTask) {
stdx::mutex mutex;
int i = 0;
- OperationContext* txn[2] = {nullptr, nullptr};
+ OperationContext* opCtx[2] = {nullptr, nullptr};
Status status[2] = {getDetectableErrorStatus(), getDetectableErrorStatus()};
stdx::condition_variable condition;
bool schedulingDone = false;
@@ -159,7 +159,7 @@ TEST_F(TaskRunnerTest, SkipSecondTask) {
if (j >= 2) {
return TaskRunner::NextAction::kCancel;
}
- txn[j] = theTxn;
+ opCtx[j] = theTxn;
status[j] = theStatus;
// Wait for the test code to schedule the second task.
@@ -182,16 +182,16 @@ TEST_F(TaskRunnerTest, SkipSecondTask) {
stdx::lock_guard<stdx::mutex> lk(mutex);
ASSERT_EQUALS(2, i);
- ASSERT(txn[0]);
+ ASSERT(opCtx[0]);
ASSERT_OK(status[0]);
- ASSERT_FALSE(txn[1]);
+ ASSERT_FALSE(opCtx[1]);
ASSERT_EQUALS(ErrorCodes::CallbackCanceled, status[1].code());
}
TEST_F(TaskRunnerTest, FirstTaskThrowsException) {
stdx::mutex mutex;
int i = 0;
- OperationContext* txn[2] = {nullptr, nullptr};
+ OperationContext* opCtx[2] = {nullptr, nullptr};
Status status[2] = {getDetectableErrorStatus(), getDetectableErrorStatus()};
stdx::condition_variable condition;
bool schedulingDone = false;
@@ -201,7 +201,7 @@ TEST_F(TaskRunnerTest, FirstTaskThrowsException) {
if (j >= 2) {
return TaskRunner::NextAction::kCancel;
}
- txn[j] = theTxn;
+ opCtx[j] = theTxn;
status[j] = theStatus;
// Wait for the test code to schedule the second task.
@@ -231,9 +231,9 @@ TEST_F(TaskRunnerTest, FirstTaskThrowsException) {
stdx::lock_guard<stdx::mutex> lk(mutex);
ASSERT_EQUALS(2, i);
- ASSERT(txn[0]);
+ ASSERT(opCtx[0]);
ASSERT_OK(status[0]);
- ASSERT_FALSE(txn[1]);
+ ASSERT_FALSE(opCtx[1]);
ASSERT_EQUALS(ErrorCodes::CallbackCanceled, status[1].code());
}
diff --git a/src/mongo/db/restapi.cpp b/src/mongo/db/restapi.cpp
index caa8cafa8ec..532f0c81213 100644
--- a/src/mongo/db/restapi.cpp
+++ b/src/mongo/db/restapi.cpp
@@ -70,7 +70,7 @@ public:
return url[0] == '/' && url.find_last_of('/') > 0;
}
- virtual void handle(OperationContext* txn,
+ virtual void handle(OperationContext* opCtx,
const char* rq,
const std::string& url,
BSONObj params,
@@ -78,7 +78,7 @@ public:
int& responseCode,
vector<string>& headers,
const SockAddr& from) {
- DBDirectClient db(txn);
+ DBDirectClient db(opCtx);
string::size_type first = url.find("/", 1);
if (first == string::npos) {
@@ -115,10 +115,10 @@ public:
if (method == "GET") {
responseCode = 200;
- html = handleRESTQuery(txn, fullns, action, params, responseCode, ss);
+ html = handleRESTQuery(opCtx, fullns, action, params, responseCode, ss);
} else if (method == "POST") {
responseCode = 201;
- handlePost(txn, fullns, MiniWebServer::body(rq), params, responseCode, ss);
+ handlePost(opCtx, fullns, MiniWebServer::body(rq), params, responseCode, ss);
} else {
responseCode = 400;
headers.push_back("X_err: bad request");
@@ -134,7 +134,7 @@ public:
responseMsg = ss.str();
}
- bool handleRESTQuery(OperationContext* txn,
+ bool handleRESTQuery(OperationContext* opCtx,
const std::string& ns,
const std::string& action,
BSONObj& params,
@@ -177,7 +177,7 @@ public:
BSONObj query = queryBuilder.obj();
- DBDirectClient db(txn);
+ DBDirectClient db(opCtx);
unique_ptr<DBClientCursor> cursor = db.query(ns.c_str(), query, num, skip);
uassert(13085, "query failed for dbwebserver", cursor.get());
@@ -235,7 +235,7 @@ public:
}
// TODO Generate id and revision per couch POST spec
- void handlePost(OperationContext* txn,
+ void handlePost(OperationContext* opCtx,
const std::string& ns,
const char* body,
BSONObj& params,
@@ -244,7 +244,7 @@ public:
try {
BSONObj obj = fromjson(body);
- DBDirectClient db(txn);
+ DBDirectClient db(opCtx);
db.insert(ns.c_str(), obj);
} catch (...) {
responseCode = 400; // Bad Request. Seems reasonable for now.
@@ -265,9 +265,9 @@ public:
}
} restHandler;
-bool RestAdminAccess::haveAdminUsers(OperationContext* txn) const {
- AuthorizationSession* authzSession = AuthorizationSession::get(txn->getClient());
- return authzSession->getAuthorizationManager().hasAnyPrivilegeDocuments(txn);
+bool RestAdminAccess::haveAdminUsers(OperationContext* opCtx) const {
+ AuthorizationSession* authzSession = AuthorizationSession::get(opCtx->getClient());
+ return authzSession->getAuthorizationManager().hasAnyPrivilegeDocuments(opCtx);
}
class LowLevelMongodStatus : public WebStatusPlugin {
@@ -304,9 +304,9 @@ public:
ss << "</pre>\n";
}
- virtual void run(OperationContext* txn, stringstream& ss) {
+ virtual void run(OperationContext* opCtx, stringstream& ss) {
Timer t;
- Lock::GlobalLock globalSLock(txn->lockState(), MODE_S, 300);
+ Lock::GlobalLock globalSLock(opCtx->lockState(), MODE_S, 300);
if (globalSLock.isLocked()) {
_gotLock(t.millis(), ss);
} else {
diff --git a/src/mongo/db/restapi.h b/src/mongo/db/restapi.h
index 50a603a88dd..39f5e0a93aa 100644
--- a/src/mongo/db/restapi.h
+++ b/src/mongo/db/restapi.h
@@ -43,7 +43,7 @@ class RestAdminAccess : public AdminAccess {
public:
virtual ~RestAdminAccess() {}
- virtual bool haveAdminUsers(OperationContext* txn) const;
+ virtual bool haveAdminUsers(OperationContext* opCtx) const;
};
} // namespace mongo
diff --git a/src/mongo/db/run_commands.cpp b/src/mongo/db/run_commands.cpp
index 41adbdfb507..ec9372c87a3 100644
--- a/src/mongo/db/run_commands.cpp
+++ b/src/mongo/db/run_commands.cpp
@@ -40,7 +40,7 @@
namespace mongo {
-void runCommands(OperationContext* txn,
+void runCommands(OperationContext* opCtx,
const rpc::RequestInterface& request,
rpc::ReplyBuilderInterface* replyBuilder) {
try {
@@ -66,15 +66,15 @@ void runCommands(OperationContext* txn,
{
// Try to set this as early as possible, as soon as we have figured out the command.
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setLogicalOp_inlock(c->getLogicalOp());
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->setLogicalOp_inlock(c->getLogicalOp());
}
- Command::execCommand(txn, c, request, replyBuilder);
+ Command::execCommand(opCtx, c, request, replyBuilder);
}
catch (const DBException& ex) {
- Command::generateErrorResponse(txn, replyBuilder, ex, request);
+ Command::generateErrorResponse(opCtx, replyBuilder, ex, request);
}
}
diff --git a/src/mongo/db/run_commands.h b/src/mongo/db/run_commands.h
index c0c48e0dfa0..6d443c5b6cc 100644
--- a/src/mongo/db/run_commands.h
+++ b/src/mongo/db/run_commands.h
@@ -36,7 +36,7 @@ class ReplyBuilderInterface;
class RequestInterface;
} // namespace rpc
-void runCommands(OperationContext* txn,
+void runCommands(OperationContext* opCtx,
const rpc::RequestInterface& request,
rpc::ReplyBuilderInterface* replyBuilder);
diff --git a/src/mongo/db/s/active_migrations_registry.cpp b/src/mongo/db/s/active_migrations_registry.cpp
index 53f250ef9fd..91aa5dd7bf4 100644
--- a/src/mongo/db/s/active_migrations_registry.cpp
+++ b/src/mongo/db/s/active_migrations_registry.cpp
@@ -90,7 +90,7 @@ boost::optional<NamespaceString> ActiveMigrationsRegistry::getActiveDonateChunkN
return boost::none;
}
-BSONObj ActiveMigrationsRegistry::getActiveMigrationStatusReport(OperationContext* txn) {
+BSONObj ActiveMigrationsRegistry::getActiveMigrationStatusReport(OperationContext* opCtx) {
boost::optional<NamespaceString> nss;
{
stdx::lock_guard<stdx::mutex> lk(_mutex);
@@ -106,9 +106,9 @@ BSONObj ActiveMigrationsRegistry::getActiveMigrationStatusReport(OperationContex
// desireable for reporting, and then diagnosing, migrations that are stuck.
if (nss) {
// Lock the collection so nothing changes while we're getting the migration report.
- AutoGetCollection autoColl(txn, nss.get(), MODE_IS);
+ AutoGetCollection autoColl(opCtx, nss.get(), MODE_IS);
- auto css = CollectionShardingState::get(txn, nss.get());
+ auto css = CollectionShardingState::get(opCtx, nss.get());
if (css && css->getMigrationSourceManager()) {
return css->getMigrationSourceManager()->getMigrationStatusReport();
}
@@ -187,9 +187,9 @@ void ScopedRegisterDonateChunk::complete(Status status) {
_completionNotification->set(status);
}
-Status ScopedRegisterDonateChunk::waitForCompletion(OperationContext* txn) {
+Status ScopedRegisterDonateChunk::waitForCompletion(OperationContext* opCtx) {
invariant(!_forUnregister);
- return _completionNotification->get(txn);
+ return _completionNotification->get(opCtx);
}
ScopedRegisterReceiveChunk::ScopedRegisterReceiveChunk(ActiveMigrationsRegistry* registry)
diff --git a/src/mongo/db/s/active_migrations_registry.h b/src/mongo/db/s/active_migrations_registry.h
index 971b020626d..982cb2f8b26 100644
--- a/src/mongo/db/s/active_migrations_registry.h
+++ b/src/mongo/db/s/active_migrations_registry.h
@@ -92,7 +92,7 @@ public:
*
* Takes an IS lock on the namespace of the active migration, if one is active.
*/
- BSONObj getActiveMigrationStatusReport(OperationContext* txn);
+ BSONObj getActiveMigrationStatusReport(OperationContext* opCtx);
private:
friend class ScopedRegisterDonateChunk;
@@ -194,7 +194,7 @@ public:
* Must only be called if the object is in the 'join' mode. Blocks until the main executor of
* the moveChunk command calls complete.
*/
- Status waitForCompletion(OperationContext* txn);
+ Status waitForCompletion(OperationContext* opCtx);
private:
// Registry from which to unregister the migration. Not owned.
diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp
index fd798ecd665..60a765cf5d6 100644
--- a/src/mongo/db/s/balancer/balancer.cpp
+++ b/src/mongo/db/s/balancer/balancer.cpp
@@ -186,12 +186,12 @@ Balancer* Balancer::get(OperationContext* operationContext) {
return get(operationContext->getServiceContext());
}
-void Balancer::initiateBalancer(OperationContext* txn) {
+void Balancer::initiateBalancer(OperationContext* opCtx) {
stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
invariant(_state == kStopped);
_state = kRunning;
- _migrationManager.startRecoveryAndAcquireDistLocks(txn);
+ _migrationManager.startRecoveryAndAcquireDistLocks(opCtx);
invariant(!_thread.joinable());
invariant(!_threadOperationContext);
@@ -240,15 +240,15 @@ void Balancer::waitForBalancerToStop() {
LOG(1) << "Balancer thread terminated";
}
-void Balancer::joinCurrentRound(OperationContext* txn) {
+void Balancer::joinCurrentRound(OperationContext* opCtx) {
stdx::unique_lock<stdx::mutex> scopedLock(_mutex);
const auto numRoundsAtStart = _numBalancerRounds;
_condVar.wait(scopedLock,
[&] { return !_inBalancerRound || _numBalancerRounds != numRoundsAtStart; });
}
-Status Balancer::rebalanceSingleChunk(OperationContext* txn, const ChunkType& chunk) {
- auto migrateStatus = _chunkSelectionPolicy->selectSpecificChunkToMove(txn, chunk);
+Status Balancer::rebalanceSingleChunk(OperationContext* opCtx, const ChunkType& chunk) {
+ auto migrateStatus = _chunkSelectionPolicy->selectSpecificChunkToMove(opCtx, chunk);
if (!migrateStatus.isOK()) {
return migrateStatus.getStatus();
}
@@ -259,37 +259,37 @@ Status Balancer::rebalanceSingleChunk(OperationContext* txn, const ChunkType& ch
return Status::OK();
}
- auto balancerConfig = Grid::get(txn)->getBalancerConfiguration();
- Status refreshStatus = balancerConfig->refreshAndCheck(txn);
+ auto balancerConfig = Grid::get(opCtx)->getBalancerConfiguration();
+ Status refreshStatus = balancerConfig->refreshAndCheck(opCtx);
if (!refreshStatus.isOK()) {
return refreshStatus;
}
- return _migrationManager.executeManualMigration(txn,
+ return _migrationManager.executeManualMigration(opCtx,
*migrateInfo,
balancerConfig->getMaxChunkSizeBytes(),
balancerConfig->getSecondaryThrottle(),
balancerConfig->waitForDelete());
}
-Status Balancer::moveSingleChunk(OperationContext* txn,
+Status Balancer::moveSingleChunk(OperationContext* opCtx,
const ChunkType& chunk,
const ShardId& newShardId,
uint64_t maxChunkSizeBytes,
const MigrationSecondaryThrottleOptions& secondaryThrottle,
bool waitForDelete) {
- auto moveAllowedStatus = _chunkSelectionPolicy->checkMoveAllowed(txn, chunk, newShardId);
+ auto moveAllowedStatus = _chunkSelectionPolicy->checkMoveAllowed(opCtx, chunk, newShardId);
if (!moveAllowedStatus.isOK()) {
return moveAllowedStatus;
}
return _migrationManager.executeManualMigration(
- txn, MigrateInfo(newShardId, chunk), maxChunkSizeBytes, secondaryThrottle, waitForDelete);
+ opCtx, MigrateInfo(newShardId, chunk), maxChunkSizeBytes, secondaryThrottle, waitForDelete);
}
-void Balancer::report(OperationContext* txn, BSONObjBuilder* builder) {
- auto balancerConfig = Grid::get(txn)->getBalancerConfiguration();
- balancerConfig->refreshAndCheck(txn);
+void Balancer::report(OperationContext* opCtx, BSONObjBuilder* builder) {
+ auto balancerConfig = Grid::get(opCtx)->getBalancerConfiguration();
+ balancerConfig->refreshAndCheck(opCtx);
const auto mode = balancerConfig->getBalancerMode();
@@ -301,27 +301,27 @@ void Balancer::report(OperationContext* txn, BSONObjBuilder* builder) {
void Balancer::_mainThread() {
Client::initThread("Balancer");
- auto txn = cc().makeOperationContext();
- auto shardingContext = Grid::get(txn.get());
+ auto opCtx = cc().makeOperationContext();
+ auto shardingContext = Grid::get(opCtx.get());
log() << "CSRS balancer is starting";
{
stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
- _threadOperationContext = txn.get();
+ _threadOperationContext = opCtx.get();
}
const Seconds kInitBackoffInterval(10);
auto balancerConfig = shardingContext->getBalancerConfiguration();
while (!_stopRequested()) {
- Status refreshStatus = balancerConfig->refreshAndCheck(txn.get());
+ Status refreshStatus = balancerConfig->refreshAndCheck(opCtx.get());
if (!refreshStatus.isOK()) {
warning() << "Balancer settings could not be loaded and will be retried in "
<< durationCount<Seconds>(kInitBackoffInterval) << " seconds"
<< causedBy(refreshStatus);
- _sleepFor(txn.get(), kInitBackoffInterval);
+ _sleepFor(opCtx.get(), kInitBackoffInterval);
continue;
}
@@ -330,8 +330,9 @@ void Balancer::_mainThread() {
log() << "CSRS balancer thread is recovering";
- _migrationManager.finishRecovery(
- txn.get(), balancerConfig->getMaxChunkSizeBytes(), balancerConfig->getSecondaryThrottle());
+ _migrationManager.finishRecovery(opCtx.get(),
+ balancerConfig->getMaxChunkSizeBytes(),
+ balancerConfig->getSecondaryThrottle());
log() << "CSRS balancer thread is recovered";
@@ -339,23 +340,23 @@ void Balancer::_mainThread() {
while (!_stopRequested()) {
BalanceRoundDetails roundDetails;
- _beginRound(txn.get());
+ _beginRound(opCtx.get());
try {
- shardingContext->shardRegistry()->reload(txn.get());
+ shardingContext->shardRegistry()->reload(opCtx.get());
- uassert(13258, "oids broken after resetting!", _checkOIDs(txn.get()));
+ uassert(13258, "oids broken after resetting!", _checkOIDs(opCtx.get()));
- Status refreshStatus = balancerConfig->refreshAndCheck(txn.get());
+ Status refreshStatus = balancerConfig->refreshAndCheck(opCtx.get());
if (!refreshStatus.isOK()) {
warning() << "Skipping balancing round" << causedBy(refreshStatus);
- _endRound(txn.get(), kBalanceRoundDefaultInterval);
+ _endRound(opCtx.get(), kBalanceRoundDefaultInterval);
continue;
}
if (!balancerConfig->shouldBalance()) {
LOG(1) << "Skipping balancing round because balancing is disabled";
- _endRound(txn.get(), kBalanceRoundDefaultInterval);
+ _endRound(opCtx.get(), kBalanceRoundDefaultInterval);
continue;
}
@@ -366,9 +367,9 @@ void Balancer::_mainThread() {
<< balancerConfig->getSecondaryThrottle().toBSON();
OCCASIONALLY warnOnMultiVersion(
- uassertStatusOK(_clusterStats->getStats(txn.get())));
+ uassertStatusOK(_clusterStats->getStats(opCtx.get())));
- Status status = _enforceTagRanges(txn.get());
+ Status status = _enforceTagRanges(opCtx.get());
if (!status.isOK()) {
warning() << "Failed to enforce tag ranges" << causedBy(status);
} else {
@@ -376,25 +377,25 @@ void Balancer::_mainThread() {
}
const auto candidateChunks = uassertStatusOK(
- _chunkSelectionPolicy->selectChunksToMove(txn.get(), _balancedLastTime));
+ _chunkSelectionPolicy->selectChunksToMove(opCtx.get(), _balancedLastTime));
if (candidateChunks.empty()) {
LOG(1) << "no need to move any chunk";
_balancedLastTime = false;
} else {
- _balancedLastTime = _moveChunks(txn.get(), candidateChunks);
+ _balancedLastTime = _moveChunks(opCtx.get(), candidateChunks);
roundDetails.setSucceeded(static_cast<int>(candidateChunks.size()),
_balancedLastTime);
- shardingContext->catalogClient(txn.get())->logAction(
- txn.get(), "balancer.round", "", roundDetails.toBSON());
+ shardingContext->catalogClient(opCtx.get())
+ ->logAction(opCtx.get(), "balancer.round", "", roundDetails.toBSON());
}
LOG(1) << "*** End of balancing round";
}
- _endRound(txn.get(),
+ _endRound(opCtx.get(),
_balancedLastTime ? kShortBalanceRoundInterval
: kBalanceRoundDefaultInterval);
} catch (const std::exception& e) {
@@ -406,11 +407,11 @@ void Balancer::_mainThread() {
// This round failed, tell the world!
roundDetails.setFailed(e.what());
- shardingContext->catalogClient(txn.get())->logAction(
- txn.get(), "balancer.round", "", roundDetails.toBSON());
+ shardingContext->catalogClient(opCtx.get())
+ ->logAction(opCtx.get(), "balancer.round", "", roundDetails.toBSON());
// Sleep a fair amount before retrying because of the error
- _endRound(txn.get(), kBalanceRoundDefaultInterval);
+ _endRound(opCtx.get(), kBalanceRoundDefaultInterval);
}
}
@@ -437,13 +438,13 @@ bool Balancer::_stopRequested() {
return (_state != kRunning);
}
-void Balancer::_beginRound(OperationContext* txn) {
+void Balancer::_beginRound(OperationContext* opCtx) {
stdx::unique_lock<stdx::mutex> lock(_mutex);
_inBalancerRound = true;
_condVar.notify_all();
}
-void Balancer::_endRound(OperationContext* txn, Seconds waitTimeout) {
+void Balancer::_endRound(OperationContext* opCtx, Seconds waitTimeout) {
{
stdx::lock_guard<stdx::mutex> lock(_mutex);
_inBalancerRound = false;
@@ -451,16 +452,16 @@ void Balancer::_endRound(OperationContext* txn, Seconds waitTimeout) {
_condVar.notify_all();
}
- _sleepFor(txn, waitTimeout);
+ _sleepFor(opCtx, waitTimeout);
}
-void Balancer::_sleepFor(OperationContext* txn, Seconds waitTimeout) {
+void Balancer::_sleepFor(OperationContext* opCtx, Seconds waitTimeout) {
stdx::unique_lock<stdx::mutex> lock(_mutex);
_condVar.wait_for(lock, waitTimeout.toSystemDuration(), [&] { return _state != kRunning; });
}
-bool Balancer::_checkOIDs(OperationContext* txn) {
- auto shardingContext = Grid::get(txn);
+bool Balancer::_checkOIDs(OperationContext* opCtx) {
+ auto shardingContext = Grid::get(opCtx);
vector<ShardId> all;
shardingContext->shardRegistry()->getAllShardIds(&all);
@@ -473,14 +474,14 @@ bool Balancer::_checkOIDs(OperationContext* txn) {
return false;
}
- auto shardStatus = shardingContext->shardRegistry()->getShard(txn, shardId);
+ auto shardStatus = shardingContext->shardRegistry()->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
continue;
}
const auto s = shardStatus.getValue();
auto result = uassertStatusOK(
- s->runCommandWithFixedRetryAttempts(txn,
+ s->runCommandWithFixedRetryAttempts(opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"admin",
BSON("features" << 1),
@@ -497,18 +498,18 @@ bool Balancer::_checkOIDs(OperationContext* txn) {
<< " and " << oids[x];
result = uassertStatusOK(s->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"admin",
BSON("features" << 1 << "oidReset" << 1),
Shard::RetryPolicy::kIdempotent));
uassertStatusOK(result.commandStatus);
- auto otherShardStatus = shardingContext->shardRegistry()->getShard(txn, oids[x]);
+ auto otherShardStatus = shardingContext->shardRegistry()->getShard(opCtx, oids[x]);
if (otherShardStatus.isOK()) {
result = uassertStatusOK(
otherShardStatus.getValue()->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"admin",
BSON("features" << 1 << "oidReset" << 1),
@@ -526,14 +527,14 @@ bool Balancer::_checkOIDs(OperationContext* txn) {
return true;
}
-Status Balancer::_enforceTagRanges(OperationContext* txn) {
- auto chunksToSplitStatus = _chunkSelectionPolicy->selectChunksToSplit(txn);
+Status Balancer::_enforceTagRanges(OperationContext* opCtx) {
+ auto chunksToSplitStatus = _chunkSelectionPolicy->selectChunksToSplit(opCtx);
if (!chunksToSplitStatus.isOK()) {
return chunksToSplitStatus.getStatus();
}
for (const auto& splitInfo : chunksToSplitStatus.getValue()) {
- auto scopedCMStatus = ScopedChunkManager::refreshAndGet(txn, splitInfo.nss);
+ auto scopedCMStatus = ScopedChunkManager::refreshAndGet(opCtx, splitInfo.nss);
if (!scopedCMStatus.isOK()) {
return scopedCMStatus.getStatus();
}
@@ -541,7 +542,7 @@ Status Balancer::_enforceTagRanges(OperationContext* txn) {
const auto& scopedCM = scopedCMStatus.getValue();
auto splitStatus =
- shardutil::splitChunkAtMultiplePoints(txn,
+ shardutil::splitChunkAtMultiplePoints(opCtx,
splitInfo.shardId,
splitInfo.nss,
scopedCM.cm()->getShardKeyPattern(),
@@ -557,9 +558,9 @@ Status Balancer::_enforceTagRanges(OperationContext* txn) {
return Status::OK();
}
-int Balancer::_moveChunks(OperationContext* txn,
+int Balancer::_moveChunks(OperationContext* opCtx,
const BalancerChunkSelectionPolicy::MigrateInfoVector& candidateChunks) {
- auto balancerConfig = Grid::get(txn)->getBalancerConfiguration();
+ auto balancerConfig = Grid::get(opCtx)->getBalancerConfiguration();
// If the balancer was disabled since we started this round, don't start new chunk moves
if (_stopRequested() || !balancerConfig->shouldBalance()) {
@@ -568,7 +569,7 @@ int Balancer::_moveChunks(OperationContext* txn,
}
auto migrationStatuses =
- _migrationManager.executeMigrationsForAutoBalance(txn,
+ _migrationManager.executeMigrationsForAutoBalance(opCtx,
candidateChunks,
balancerConfig->getMaxChunkSizeBytes(),
balancerConfig->getSecondaryThrottle(),
@@ -598,7 +599,7 @@ int Balancer::_moveChunks(OperationContext* txn,
log() << "Performing a split because migration " << redact(requestIt->toString())
<< " failed for size reasons" << causedBy(redact(status));
- _splitOrMarkJumbo(txn, NamespaceString(requestIt->ns), requestIt->minKey);
+ _splitOrMarkJumbo(opCtx, NamespaceString(requestIt->ns), requestIt->minKey);
continue;
}
@@ -609,28 +610,28 @@ int Balancer::_moveChunks(OperationContext* txn,
return numChunksProcessed;
}
-void Balancer::_splitOrMarkJumbo(OperationContext* txn,
+void Balancer::_splitOrMarkJumbo(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& minKey) {
- auto scopedCM = uassertStatusOK(ScopedChunkManager::refreshAndGet(txn, nss));
+ auto scopedCM = uassertStatusOK(ScopedChunkManager::refreshAndGet(opCtx, nss));
const auto cm = scopedCM.cm().get();
auto chunk = cm->findIntersectingChunkWithSimpleCollation(minKey);
try {
const auto splitPoints = uassertStatusOK(shardutil::selectChunkSplitPoints(
- txn,
+ opCtx,
chunk->getShardId(),
nss,
cm->getShardKeyPattern(),
ChunkRange(chunk->getMin(), chunk->getMax()),
- Grid::get(txn)->getBalancerConfiguration()->getMaxChunkSizeBytes(),
+ Grid::get(opCtx)->getBalancerConfiguration()->getMaxChunkSizeBytes(),
boost::none));
uassert(ErrorCodes::CannotSplit, "No split points found", !splitPoints.empty());
uassertStatusOK(
- shardutil::splitChunkAtMultiplePoints(txn,
+ shardutil::splitChunkAtMultiplePoints(opCtx,
chunk->getShardId(),
nss,
cm->getShardKeyPattern(),
@@ -644,8 +645,8 @@ void Balancer::_splitOrMarkJumbo(OperationContext* txn,
const std::string chunkName = ChunkType::genID(nss.ns(), chunk->getMin());
- auto status = Grid::get(txn)->catalogClient(txn)->updateConfigDocument(
- txn,
+ auto status = Grid::get(opCtx)->catalogClient(opCtx)->updateConfigDocument(
+ opCtx,
ChunkType::ConfigNS,
BSON(ChunkType::name(chunkName)),
BSON("$set" << BSON(ChunkType::jumbo(true))),
diff --git a/src/mongo/db/s/balancer/balancer.h b/src/mongo/db/s/balancer/balancer.h
index 9171daac8cd..1537c476357 100644
--- a/src/mongo/db/s/balancer/balancer.h
+++ b/src/mongo/db/s/balancer/balancer.h
@@ -81,7 +81,7 @@ public:
* waitForBalancerToStop has been called before). Any code in this call must not try to acquire
* any locks or to wait on operations, which acquire locks.
*/
- void initiateBalancer(OperationContext* txn);
+ void initiateBalancer(OperationContext* opCtx);
/**
* Invoked when this node which is currently serving as a 'PRIMARY' steps down and is invoked
@@ -110,7 +110,7 @@ public:
* Potentially blocking method, which will return immediately if the balancer is not running a
* balancer round and will block until the current round completes otherwise.
*/
- void joinCurrentRound(OperationContext* txn);
+ void joinCurrentRound(OperationContext* opCtx);
/**
* Blocking call, which requests the balancer to move a single chunk to a more appropriate
@@ -118,7 +118,7 @@ public:
* will actually move because it may already be at the best shard. An error will be returned if
* the attempt to find a better shard or the actual migration fail for any reason.
*/
- Status rebalanceSingleChunk(OperationContext* txn, const ChunkType& chunk);
+ Status rebalanceSingleChunk(OperationContext* opCtx, const ChunkType& chunk);
/**
* Blocking call, which requests the balancer to move a single chunk to the specified location
@@ -128,7 +128,7 @@ public:
* NOTE: This call disregards the balancer enabled/disabled status and will proceed with the
* move regardless. If should be used only for user-initiated moves.
*/
- Status moveSingleChunk(OperationContext* txn,
+ Status moveSingleChunk(OperationContext* opCtx,
const ChunkType& chunk,
const ShardId& newShardId,
uint64_t maxChunkSizeBytes,
@@ -138,7 +138,7 @@ public:
/**
* Appends the runtime state of the balancer instance to the specified builder.
*/
- void report(OperationContext* txn, BSONObjBuilder* builder);
+ void report(OperationContext* opCtx, BSONObjBuilder* builder);
private:
/**
@@ -163,39 +163,39 @@ private:
/**
* Signals the beginning and end of a balancing round.
*/
- void _beginRound(OperationContext* txn);
- void _endRound(OperationContext* txn, Seconds waitTimeout);
+ void _beginRound(OperationContext* opCtx);
+ void _endRound(OperationContext* opCtx, Seconds waitTimeout);
/**
* Blocks the caller for the specified timeout or until the balancer condition variable is
* signaled, whichever comes first.
*/
- void _sleepFor(OperationContext* txn, Seconds waitTimeout);
+ void _sleepFor(OperationContext* opCtx, Seconds waitTimeout);
/**
* Returns true if all the servers listed in configdb as being shards are reachable and are
* distinct processes (no hostname mixup).
*/
- bool _checkOIDs(OperationContext* txn);
+ bool _checkOIDs(OperationContext* opCtx);
/**
* Iterates through all chunks in all collections and ensures that no chunks straddle tag
* boundary. If any do, they will be split.
*/
- Status _enforceTagRanges(OperationContext* txn);
+ Status _enforceTagRanges(OperationContext* opCtx);
/**
* Schedules migrations for the specified set of chunks and returns how many chunks were
* successfully processed.
*/
- int _moveChunks(OperationContext* txn,
+ int _moveChunks(OperationContext* opCtx,
const BalancerChunkSelectionPolicy::MigrateInfoVector& candidateChunks);
/**
* Performs a split on the chunk with min value "minKey". If the split fails, it is marked as
* jumbo.
*/
- void _splitOrMarkJumbo(OperationContext* txn,
+ void _splitOrMarkJumbo(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& minKey);
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy.h b/src/mongo/db/s/balancer/balancer_chunk_selection_policy.h
index e2d7f6a024e..990f5821e08 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy.h
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy.h
@@ -87,14 +87,14 @@ public:
* they violate the policy for some reason. The reason is decided by the policy and may include
* chunk is too big or chunk straddles a tag range.
*/
- virtual StatusWith<SplitInfoVector> selectChunksToSplit(OperationContext* txn) = 0;
+ virtual StatusWith<SplitInfoVector> selectChunksToSplit(OperationContext* opCtx) = 0;
/**
* Potentially blocking method, which gives out a set of chunks to be moved. The
* aggressiveBalanceHint indicates to the balancing logic that it should lower the threshold for
* difference in number of chunks across shards and thus potentially cause more chunks to move.
*/
- virtual StatusWith<MigrateInfoVector> selectChunksToMove(OperationContext* txn,
+ virtual StatusWith<MigrateInfoVector> selectChunksToMove(OperationContext* opCtx,
bool aggressiveBalanceHint) = 0;
/**
@@ -104,14 +104,14 @@ public:
* Otherwise returns migration information for where the chunk should be moved.
*/
virtual StatusWith<boost::optional<MigrateInfo>> selectSpecificChunkToMove(
- OperationContext* txn, const ChunkType& chunk) = 0;
+ OperationContext* opCtx, const ChunkType& chunk) = 0;
/**
* Asks the chunk selection policy to validate that the specified chunk migration is allowed
* given the current rules. Returns OK if the migration won't violate any rules or any other
* failed status otherwise.
*/
- virtual Status checkMoveAllowed(OperationContext* txn,
+ virtual Status checkMoveAllowed(OperationContext* opCtx,
const ChunkType& chunk,
const ShardId& newShardId) = 0;
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
index 4f3905b61bd..a4574dfc676 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
@@ -63,7 +63,7 @@ namespace {
* distrubution and chunk placement information which is needed by the balancer policy.
*/
StatusWith<DistributionStatus> createCollectionDistributionStatus(
- OperationContext* txn, const ShardStatisticsVector& allShards, ChunkManager* chunkMgr) {
+ OperationContext* opCtx, const ShardStatisticsVector& allShards, ChunkManager* chunkMgr) {
ShardToChunksMap shardToChunksMap;
// Makes sure there is an entry in shardToChunksMap for every shard, so empty shards will also
@@ -87,8 +87,8 @@ StatusWith<DistributionStatus> createCollectionDistributionStatus(
}
vector<TagsType> collectionTags;
- Status tagsStatus = Grid::get(txn)->catalogClient(txn)->getTagsForCollection(
- txn, chunkMgr->getns(), &collectionTags);
+ Status tagsStatus = Grid::get(opCtx)->catalogClient(opCtx)->getTagsForCollection(
+ opCtx, chunkMgr->getns(), &collectionTags);
if (!tagsStatus.isOK()) {
return {tagsStatus.code(),
str::stream() << "Unable to load tags for collection " << chunkMgr->getns()
@@ -186,8 +186,8 @@ BalancerChunkSelectionPolicyImpl::BalancerChunkSelectionPolicyImpl(ClusterStatis
BalancerChunkSelectionPolicyImpl::~BalancerChunkSelectionPolicyImpl() = default;
StatusWith<SplitInfoVector> BalancerChunkSelectionPolicyImpl::selectChunksToSplit(
- OperationContext* txn) {
- auto shardStatsStatus = _clusterStats->getStats(txn);
+ OperationContext* opCtx) {
+ auto shardStatsStatus = _clusterStats->getStats(opCtx);
if (!shardStatsStatus.isOK()) {
return shardStatsStatus.getStatus();
}
@@ -196,8 +196,8 @@ StatusWith<SplitInfoVector> BalancerChunkSelectionPolicyImpl::selectChunksToSpli
vector<CollectionType> collections;
- Status collsStatus =
- Grid::get(txn)->catalogClient(txn)->getCollections(txn, nullptr, &collections, nullptr);
+ Status collsStatus = Grid::get(opCtx)->catalogClient(opCtx)->getCollections(
+ opCtx, nullptr, &collections, nullptr);
if (!collsStatus.isOK()) {
return collsStatus;
}
@@ -215,7 +215,7 @@ StatusWith<SplitInfoVector> BalancerChunkSelectionPolicyImpl::selectChunksToSpli
const NamespaceString nss(coll.getNs());
- auto candidatesStatus = _getSplitCandidatesForCollection(txn, nss, shardStats);
+ auto candidatesStatus = _getSplitCandidatesForCollection(opCtx, nss, shardStats);
if (candidatesStatus == ErrorCodes::NamespaceNotFound) {
// Namespace got dropped before we managed to get to it, so just skip it
continue;
@@ -234,8 +234,8 @@ StatusWith<SplitInfoVector> BalancerChunkSelectionPolicyImpl::selectChunksToSpli
}
StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::selectChunksToMove(
- OperationContext* txn, bool aggressiveBalanceHint) {
- auto shardStatsStatus = _clusterStats->getStats(txn);
+ OperationContext* opCtx, bool aggressiveBalanceHint) {
+ auto shardStatsStatus = _clusterStats->getStats(opCtx);
if (!shardStatsStatus.isOK()) {
return shardStatsStatus.getStatus();
}
@@ -248,8 +248,8 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::selectChunksToMo
vector<CollectionType> collections;
- Status collsStatus =
- Grid::get(txn)->catalogClient(txn)->getCollections(txn, nullptr, &collections, nullptr);
+ Status collsStatus = Grid::get(opCtx)->catalogClient(opCtx)->getCollections(
+ opCtx, nullptr, &collections, nullptr);
if (!collsStatus.isOK()) {
return collsStatus;
}
@@ -273,7 +273,7 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::selectChunksToMo
}
auto candidatesStatus =
- _getMigrateCandidatesForCollection(txn, nss, shardStats, aggressiveBalanceHint);
+ _getMigrateCandidatesForCollection(opCtx, nss, shardStats, aggressiveBalanceHint);
if (candidatesStatus == ErrorCodes::NamespaceNotFound) {
// Namespace got dropped before we managed to get to it, so just skip it
continue;
@@ -292,9 +292,9 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::selectChunksToMo
}
StatusWith<boost::optional<MigrateInfo>>
-BalancerChunkSelectionPolicyImpl::selectSpecificChunkToMove(OperationContext* txn,
+BalancerChunkSelectionPolicyImpl::selectSpecificChunkToMove(OperationContext* opCtx,
const ChunkType& chunk) {
- auto shardStatsStatus = _clusterStats->getStats(txn);
+ auto shardStatsStatus = _clusterStats->getStats(opCtx);
if (!shardStatsStatus.isOK()) {
return shardStatsStatus.getStatus();
}
@@ -303,7 +303,7 @@ BalancerChunkSelectionPolicyImpl::selectSpecificChunkToMove(OperationContext* tx
const NamespaceString nss(chunk.getNS());
- auto scopedCMStatus = ScopedChunkManager::refreshAndGet(txn, nss);
+ auto scopedCMStatus = ScopedChunkManager::refreshAndGet(opCtx, nss);
if (!scopedCMStatus.isOK()) {
return scopedCMStatus.getStatus();
}
@@ -311,7 +311,7 @@ BalancerChunkSelectionPolicyImpl::selectSpecificChunkToMove(OperationContext* tx
const auto& scopedCM = scopedCMStatus.getValue();
const auto cm = scopedCM.cm().get();
- const auto collInfoStatus = createCollectionDistributionStatus(txn, shardStats, cm);
+ const auto collInfoStatus = createCollectionDistributionStatus(opCtx, shardStats, cm);
if (!collInfoStatus.isOK()) {
return collInfoStatus.getStatus();
}
@@ -321,10 +321,10 @@ BalancerChunkSelectionPolicyImpl::selectSpecificChunkToMove(OperationContext* tx
return BalancerPolicy::balanceSingleChunk(chunk, shardStats, distribution);
}
-Status BalancerChunkSelectionPolicyImpl::checkMoveAllowed(OperationContext* txn,
+Status BalancerChunkSelectionPolicyImpl::checkMoveAllowed(OperationContext* opCtx,
const ChunkType& chunk,
const ShardId& newShardId) {
- auto shardStatsStatus = _clusterStats->getStats(txn);
+ auto shardStatsStatus = _clusterStats->getStats(opCtx);
if (!shardStatsStatus.isOK()) {
return shardStatsStatus.getStatus();
}
@@ -333,7 +333,7 @@ Status BalancerChunkSelectionPolicyImpl::checkMoveAllowed(OperationContext* txn,
const NamespaceString nss(chunk.getNS());
- auto scopedCMStatus = ScopedChunkManager::refreshAndGet(txn, nss);
+ auto scopedCMStatus = ScopedChunkManager::refreshAndGet(opCtx, nss);
if (!scopedCMStatus.isOK()) {
return scopedCMStatus.getStatus();
}
@@ -341,7 +341,7 @@ Status BalancerChunkSelectionPolicyImpl::checkMoveAllowed(OperationContext* txn,
const auto& scopedCM = scopedCMStatus.getValue();
const auto cm = scopedCM.cm().get();
- const auto collInfoStatus = createCollectionDistributionStatus(txn, shardStats, cm);
+ const auto collInfoStatus = createCollectionDistributionStatus(opCtx, shardStats, cm);
if (!collInfoStatus.isOK()) {
return collInfoStatus.getStatus();
}
@@ -365,8 +365,8 @@ Status BalancerChunkSelectionPolicyImpl::checkMoveAllowed(OperationContext* txn,
}
StatusWith<SplitInfoVector> BalancerChunkSelectionPolicyImpl::_getSplitCandidatesForCollection(
- OperationContext* txn, const NamespaceString& nss, const ShardStatisticsVector& shardStats) {
- auto scopedCMStatus = ScopedChunkManager::refreshAndGet(txn, nss);
+ OperationContext* opCtx, const NamespaceString& nss, const ShardStatisticsVector& shardStats) {
+ auto scopedCMStatus = ScopedChunkManager::refreshAndGet(opCtx, nss);
if (!scopedCMStatus.isOK()) {
return scopedCMStatus.getStatus();
}
@@ -376,7 +376,7 @@ StatusWith<SplitInfoVector> BalancerChunkSelectionPolicyImpl::_getSplitCandidate
const auto& shardKeyPattern = cm->getShardKeyPattern().getKeyPattern();
- const auto collInfoStatus = createCollectionDistributionStatus(txn, shardStats, cm);
+ const auto collInfoStatus = createCollectionDistributionStatus(opCtx, shardStats, cm);
if (!collInfoStatus.isOK()) {
return collInfoStatus.getStatus();
}
@@ -416,11 +416,11 @@ StatusWith<SplitInfoVector> BalancerChunkSelectionPolicyImpl::_getSplitCandidate
}
StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::_getMigrateCandidatesForCollection(
- OperationContext* txn,
+ OperationContext* opCtx,
const NamespaceString& nss,
const ShardStatisticsVector& shardStats,
bool aggressiveBalanceHint) {
- auto scopedCMStatus = ScopedChunkManager::refreshAndGet(txn, nss);
+ auto scopedCMStatus = ScopedChunkManager::refreshAndGet(opCtx, nss);
if (!scopedCMStatus.isOK()) {
return scopedCMStatus.getStatus();
}
@@ -430,7 +430,7 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::_getMigrateCandi
const auto& shardKeyPattern = cm->getShardKeyPattern().getKeyPattern();
- const auto collInfoStatus = createCollectionDistributionStatus(txn, shardStats, cm);
+ const auto collInfoStatus = createCollectionDistributionStatus(opCtx, shardStats, cm);
if (!collInfoStatus.isOK()) {
return collInfoStatus.getStatus();
}
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h
index f010d8c723b..6d200911836 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h
@@ -39,15 +39,15 @@ public:
BalancerChunkSelectionPolicyImpl(ClusterStatistics* clusterStats);
~BalancerChunkSelectionPolicyImpl();
- StatusWith<SplitInfoVector> selectChunksToSplit(OperationContext* txn) override;
+ StatusWith<SplitInfoVector> selectChunksToSplit(OperationContext* opCtx) override;
- StatusWith<MigrateInfoVector> selectChunksToMove(OperationContext* txn,
+ StatusWith<MigrateInfoVector> selectChunksToMove(OperationContext* opCtx,
bool aggressiveBalanceHint) override;
StatusWith<boost::optional<MigrateInfo>> selectSpecificChunkToMove(
- OperationContext* txn, const ChunkType& chunk) override;
+ OperationContext* opCtx, const ChunkType& chunk) override;
- Status checkMoveAllowed(OperationContext* txn,
+ Status checkMoveAllowed(OperationContext* opCtx,
const ChunkType& chunk,
const ShardId& newShardId) override;
@@ -57,14 +57,16 @@ private:
* figure out whether some of them validate the tag range boundaries and need to be split.
*/
StatusWith<SplitInfoVector> _getSplitCandidatesForCollection(
- OperationContext* txn, const NamespaceString& nss, const ShardStatisticsVector& shardStats);
+ OperationContext* opCtx,
+ const NamespaceString& nss,
+ const ShardStatisticsVector& shardStats);
/**
* Synchronous method, which iterates the collection's chunks and uses the cluster statistics to
* figure out where to place them.
*/
StatusWith<MigrateInfoVector> _getMigrateCandidatesForCollection(
- OperationContext* txn,
+ OperationContext* opCtx,
const NamespaceString& nss,
const ShardStatisticsVector& shardStats,
bool aggressiveBalanceHint);
diff --git a/src/mongo/db/s/balancer/cluster_statistics.h b/src/mongo/db/s/balancer/cluster_statistics.h
index 2717c42b7ee..59435bb8dde 100644
--- a/src/mongo/db/s/balancer/cluster_statistics.h
+++ b/src/mongo/db/s/balancer/cluster_statistics.h
@@ -106,7 +106,7 @@ public:
* Retrieves a snapshot of the current shard utilization state. The implementation of this
* method may block if necessary in order to refresh its state or may return a cached value.
*/
- virtual StatusWith<std::vector<ShardStatistics>> getStats(OperationContext* txn) = 0;
+ virtual StatusWith<std::vector<ShardStatistics>> getStats(OperationContext* opCtx) = 0;
protected:
ClusterStatistics();
diff --git a/src/mongo/db/s/balancer/cluster_statistics_impl.cpp b/src/mongo/db/s/balancer/cluster_statistics_impl.cpp
index 6ae4d9c223f..0547cea1124 100644
--- a/src/mongo/db/s/balancer/cluster_statistics_impl.cpp
+++ b/src/mongo/db/s/balancer/cluster_statistics_impl.cpp
@@ -60,16 +60,16 @@ const char kVersionField[] = "version";
* ShardNotFound if shard by that id is not available on the registry
* NoSuchKey if the version could not be retrieved
*/
-StatusWith<string> retrieveShardMongoDVersion(OperationContext* txn, ShardId shardId) {
- auto shardRegistry = Grid::get(txn)->shardRegistry();
- auto shardStatus = shardRegistry->getShard(txn, shardId);
+StatusWith<string> retrieveShardMongoDVersion(OperationContext* opCtx, ShardId shardId) {
+ auto shardRegistry = Grid::get(opCtx)->shardRegistry();
+ auto shardStatus = shardRegistry->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
auto shard = shardStatus.getValue();
auto commandResponse =
- shard->runCommandWithFixedRetryAttempts(txn,
+ shard->runCommandWithFixedRetryAttempts(opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"admin",
BSON("serverStatus" << 1),
@@ -100,14 +100,14 @@ ClusterStatisticsImpl::ClusterStatisticsImpl() = default;
ClusterStatisticsImpl::~ClusterStatisticsImpl() = default;
-StatusWith<vector<ShardStatistics>> ClusterStatisticsImpl::getStats(OperationContext* txn) {
+StatusWith<vector<ShardStatistics>> ClusterStatisticsImpl::getStats(OperationContext* opCtx) {
// Get a list of all the shards that are participating in this balance round along with any
// maximum allowed quotas and current utilization. We get the latter by issuing
// db.serverStatus() (mem.mapped) to all shards.
//
// TODO: skip unresponsive shards and mark information as stale.
- auto shardsStatus = Grid::get(txn)->catalogClient(txn)->getAllShards(
- txn, repl::ReadConcernLevel::kMajorityReadConcern);
+ auto shardsStatus = Grid::get(opCtx)->catalogClient(opCtx)->getAllShards(
+ opCtx, repl::ReadConcernLevel::kMajorityReadConcern);
if (!shardsStatus.isOK()) {
return shardsStatus.getStatus();
}
@@ -117,7 +117,7 @@ StatusWith<vector<ShardStatistics>> ClusterStatisticsImpl::getStats(OperationCon
vector<ShardStatistics> stats;
for (const auto& shard : shards) {
- auto shardSizeStatus = shardutil::retrieveTotalShardSize(txn, shard.getName());
+ auto shardSizeStatus = shardutil::retrieveTotalShardSize(opCtx, shard.getName());
if (!shardSizeStatus.isOK()) {
const Status& status = shardSizeStatus.getStatus();
@@ -130,7 +130,7 @@ StatusWith<vector<ShardStatistics>> ClusterStatisticsImpl::getStats(OperationCon
string mongoDVersion;
- auto mongoDVersionStatus = retrieveShardMongoDVersion(txn, shard.getName());
+ auto mongoDVersionStatus = retrieveShardMongoDVersion(opCtx, shard.getName());
if (mongoDVersionStatus.isOK()) {
mongoDVersion = std::move(mongoDVersionStatus.getValue());
} else {
diff --git a/src/mongo/db/s/balancer/cluster_statistics_impl.h b/src/mongo/db/s/balancer/cluster_statistics_impl.h
index d03a2f2b403..6d5524a5b1a 100644
--- a/src/mongo/db/s/balancer/cluster_statistics_impl.h
+++ b/src/mongo/db/s/balancer/cluster_statistics_impl.h
@@ -42,7 +42,7 @@ public:
ClusterStatisticsImpl();
~ClusterStatisticsImpl();
- StatusWith<std::vector<ShardStatistics>> getStats(OperationContext* txn) override;
+ StatusWith<std::vector<ShardStatistics>> getStats(OperationContext* opCtx) override;
};
} // namespace mongo
diff --git a/src/mongo/db/s/balancer/migration_manager.cpp b/src/mongo/db/s/balancer/migration_manager.cpp
index 7882201e8c2..7f267b97e67 100644
--- a/src/mongo/db/s/balancer/migration_manager.cpp
+++ b/src/mongo/db/s/balancer/migration_manager.cpp
@@ -111,7 +111,7 @@ MigrationManager::~MigrationManager() {
}
MigrationStatuses MigrationManager::executeMigrationsForAutoBalance(
- OperationContext* txn,
+ OperationContext* opCtx,
const vector<MigrateInfo>& migrateInfos,
uint64_t maxChunkSizeBytes,
const MigrationSecondaryThrottleOptions& secondaryThrottle,
@@ -127,7 +127,7 @@ MigrationStatuses MigrationManager::executeMigrationsForAutoBalance(
// Write a document to the config.migrations collection, in case this migration must be
// recovered by the Balancer. Fail if the chunk is already moving.
auto statusWithScopedMigrationRequest =
- ScopedMigrationRequest::writeMigration(txn, migrateInfo, waitForDelete);
+ ScopedMigrationRequest::writeMigration(opCtx, migrateInfo, waitForDelete);
if (!statusWithScopedMigrationRequest.isOK()) {
migrationStatuses.emplace(migrateInfo.getName(),
std::move(statusWithScopedMigrationRequest.getStatus()));
@@ -137,7 +137,7 @@ MigrationStatuses MigrationManager::executeMigrationsForAutoBalance(
std::move(statusWithScopedMigrationRequest.getValue()));
responses.emplace_back(
- _schedule(txn, migrateInfo, maxChunkSizeBytes, secondaryThrottle, waitForDelete),
+ _schedule(opCtx, migrateInfo, maxChunkSizeBytes, secondaryThrottle, waitForDelete),
migrateInfo);
}
@@ -162,7 +162,7 @@ MigrationStatuses MigrationManager::executeMigrationsForAutoBalance(
}
Status MigrationManager::executeManualMigration(
- OperationContext* txn,
+ OperationContext* opCtx,
const MigrateInfo& migrateInfo,
uint64_t maxChunkSizeBytes,
const MigrationSecondaryThrottleOptions& secondaryThrottle,
@@ -172,15 +172,15 @@ Status MigrationManager::executeManualMigration(
// Write a document to the config.migrations collection, in case this migration must be
// recovered by the Balancer. Fail if the chunk is already moving.
auto statusWithScopedMigrationRequest =
- ScopedMigrationRequest::writeMigration(txn, migrateInfo, waitForDelete);
+ ScopedMigrationRequest::writeMigration(opCtx, migrateInfo, waitForDelete);
if (!statusWithScopedMigrationRequest.isOK()) {
return statusWithScopedMigrationRequest.getStatus();
}
RemoteCommandResponse remoteCommandResponse =
- _schedule(txn, migrateInfo, maxChunkSizeBytes, secondaryThrottle, waitForDelete)->get();
+ _schedule(opCtx, migrateInfo, maxChunkSizeBytes, secondaryThrottle, waitForDelete)->get();
- auto scopedCMStatus = ScopedChunkManager::refreshAndGet(txn, NamespaceString(migrateInfo.ns));
+ auto scopedCMStatus = ScopedChunkManager::refreshAndGet(opCtx, NamespaceString(migrateInfo.ns));
if (!scopedCMStatus.isOK()) {
return scopedCMStatus.getStatus();
}
@@ -204,7 +204,7 @@ Status MigrationManager::executeManualMigration(
return commandStatus;
}
-void MigrationManager::startRecoveryAndAcquireDistLocks(OperationContext* txn) {
+void MigrationManager::startRecoveryAndAcquireDistLocks(OperationContext* opCtx) {
{
stdx::lock_guard<stdx::mutex> lock(_mutex);
invariant(_state == State::kStopped);
@@ -214,15 +214,15 @@ void MigrationManager::startRecoveryAndAcquireDistLocks(OperationContext* txn) {
auto scopedGuard = MakeGuard([&] {
_migrationRecoveryMap.clear();
- _abandonActiveMigrationsAndEnableManager(txn);
+ _abandonActiveMigrationsAndEnableManager(opCtx);
});
- auto distLockManager = Grid::get(txn)->catalogClient(txn)->getDistLockManager();
+ auto distLockManager = Grid::get(opCtx)->catalogClient(opCtx)->getDistLockManager();
// Load the active migrations from the config.migrations collection.
auto statusWithMigrationsQueryResponse =
- Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
- txn,
+ Grid::get(opCtx)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
NamespaceString(MigrationType::ConfigNS),
@@ -260,7 +260,7 @@ void MigrationManager::startRecoveryAndAcquireDistLocks(OperationContext* txn) {
<< migrateType.getNss().ns());
auto statusWithDistLockHandle = distLockManager->tryLockWithLocalWriteConcern(
- txn, migrateType.getNss().ns(), whyMessage, _lockSessionID);
+ opCtx, migrateType.getNss().ns(), whyMessage, _lockSessionID);
if (!statusWithDistLockHandle.isOK()) {
log() << "Failed to acquire distributed lock for collection '"
<< migrateType.getNss().ns()
@@ -277,7 +277,7 @@ void MigrationManager::startRecoveryAndAcquireDistLocks(OperationContext* txn) {
scopedGuard.Dismiss();
}
-void MigrationManager::finishRecovery(OperationContext* txn,
+void MigrationManager::finishRecovery(OperationContext* opCtx,
uint64_t maxChunkSizeBytes,
const MigrationSecondaryThrottleOptions& secondaryThrottle) {
{
@@ -298,7 +298,7 @@ void MigrationManager::finishRecovery(OperationContext* txn,
auto scopedGuard = MakeGuard([&] {
_migrationRecoveryMap.clear();
- _abandonActiveMigrationsAndEnableManager(txn);
+ _abandonActiveMigrationsAndEnableManager(opCtx);
});
// Schedule recovered migrations.
@@ -310,7 +310,7 @@ void MigrationManager::finishRecovery(OperationContext* txn,
auto& migrateInfos = nssAndMigrateInfos.second;
invariant(!migrateInfos.empty());
- auto scopedCMStatus = ScopedChunkManager::refreshAndGet(txn, nss);
+ auto scopedCMStatus = ScopedChunkManager::refreshAndGet(opCtx, nss);
if (!scopedCMStatus.isOK()) {
// This shouldn't happen because the collection was intact and sharded when the previous
// config primary was active and the dist locks have been held by the balancer
@@ -338,23 +338,23 @@ void MigrationManager::finishRecovery(OperationContext* txn,
if (chunk->getShardId() != migrationInfo.from) {
// Chunk is no longer on the source shard specified by this migration. Erase the
// migration recovery document associated with it.
- ScopedMigrationRequest::createForRecovery(txn, nss, migrationInfo.minKey);
+ ScopedMigrationRequest::createForRecovery(opCtx, nss, migrationInfo.minKey);
continue;
}
scopedMigrationRequests.emplace_back(
- ScopedMigrationRequest::createForRecovery(txn, nss, migrationInfo.minKey));
+ ScopedMigrationRequest::createForRecovery(opCtx, nss, migrationInfo.minKey));
scheduledMigrations++;
- responses.emplace_back(
- _schedule(txn, migrationInfo, maxChunkSizeBytes, secondaryThrottle, waitForDelete));
+ responses.emplace_back(_schedule(
+ opCtx, migrationInfo, maxChunkSizeBytes, secondaryThrottle, waitForDelete));
}
// If no migrations were scheduled for this namespace, free the dist lock
if (!scheduledMigrations) {
- Grid::get(txn)->catalogClient(txn)->getDistLockManager()->unlock(
- txn, _lockSessionID, nss.ns());
+ Grid::get(opCtx)->catalogClient(opCtx)->getDistLockManager()->unlock(
+ opCtx, _lockSessionID, nss.ns());
}
}
@@ -408,7 +408,7 @@ void MigrationManager::drainActiveMigrations() {
}
shared_ptr<Notification<RemoteCommandResponse>> MigrationManager::_schedule(
- OperationContext* txn,
+ OperationContext* opCtx,
const MigrateInfo& migrateInfo,
uint64_t maxChunkSizeBytes,
const MigrationSecondaryThrottleOptions& secondaryThrottle,
@@ -425,15 +425,16 @@ shared_ptr<Notification<RemoteCommandResponse>> MigrationManager::_schedule(
}
}
- const auto fromShardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, migrateInfo.from);
+ const auto fromShardStatus =
+ Grid::get(opCtx)->shardRegistry()->getShard(opCtx, migrateInfo.from);
if (!fromShardStatus.isOK()) {
return std::make_shared<Notification<RemoteCommandResponse>>(
std::move(fromShardStatus.getStatus()));
}
const auto fromShard = fromShardStatus.getValue();
- auto fromHostStatus =
- fromShard->getTargeter()->findHost(txn, ReadPreferenceSetting{ReadPreference::PrimaryOnly});
+ auto fromHostStatus = fromShard->getTargeter()->findHost(
+ opCtx, ReadPreferenceSetting{ReadPreference::PrimaryOnly});
if (!fromHostStatus.isOK()) {
return std::make_shared<Notification<RemoteCommandResponse>>(
std::move(fromHostStatus.getStatus()));
@@ -444,7 +445,7 @@ shared_ptr<Notification<RemoteCommandResponse>> MigrationManager::_schedule(
&builder,
nss,
migrateInfo.version,
- repl::ReplicationCoordinator::get(txn)->getConfig().getConnectionString(),
+ repl::ReplicationCoordinator::get(opCtx)->getConfig().getConnectionString(),
migrateInfo.from,
migrateInfo.to,
ChunkRange(migrateInfo.minKey, migrateInfo.maxKey),
@@ -464,15 +465,16 @@ shared_ptr<Notification<RemoteCommandResponse>> MigrationManager::_schedule(
auto retVal = migration.completionNotification;
- _schedule_inlock(txn, fromHostStatus.getValue(), std::move(migration));
+ _schedule_inlock(opCtx, fromHostStatus.getValue(), std::move(migration));
return retVal;
}
-void MigrationManager::_schedule_inlock(OperationContext* txn,
+void MigrationManager::_schedule_inlock(OperationContext* opCtx,
const HostAndPort& targetHost,
Migration migration) {
- executor::TaskExecutor* const executor = Grid::get(txn)->getExecutorPool()->getFixedExecutor();
+ executor::TaskExecutor* const executor =
+ Grid::get(opCtx)->getExecutorPool()->getFixedExecutor();
const NamespaceString nss(migration.nss);
@@ -482,8 +484,8 @@ void MigrationManager::_schedule_inlock(OperationContext* txn,
// Acquire the collection distributed lock (blocking call)
auto statusWithDistLockHandle =
- Grid::get(txn)->catalogClient(txn)->getDistLockManager()->lockWithSessionID(
- txn,
+ Grid::get(opCtx)->catalogClient(opCtx)->getDistLockManager()->lockWithSessionID(
+ opCtx,
nss.ns(),
whyMessage,
_lockSessionID,
@@ -508,7 +510,7 @@ void MigrationManager::_schedule_inlock(OperationContext* txn,
auto itMigration = migrations->begin();
const RemoteCommandRequest remoteRequest(
- targetHost, NamespaceString::kAdminDb.toString(), itMigration->moveChunkCmdObj, txn);
+ targetHost, NamespaceString::kAdminDb.toString(), itMigration->moveChunkCmdObj, opCtx);
StatusWith<executor::TaskExecutor::CallbackHandle> callbackHandleWithStatus =
executor->scheduleRemoteCommand(
@@ -516,10 +518,10 @@ void MigrationManager::_schedule_inlock(OperationContext* txn,
[this, itMigration](const executor::TaskExecutor::RemoteCommandCallbackArgs& args) {
Client::initThread(getThreadName().c_str());
ON_BLOCK_EXIT([&] { Client::destroy(); });
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
stdx::lock_guard<stdx::mutex> lock(_mutex);
- _complete_inlock(txn.get(), itMigration, args.response);
+ _complete_inlock(opCtx.get(), itMigration, args.response);
});
if (callbackHandleWithStatus.isOK()) {
@@ -527,10 +529,10 @@ void MigrationManager::_schedule_inlock(OperationContext* txn,
return;
}
- _complete_inlock(txn, itMigration, std::move(callbackHandleWithStatus.getStatus()));
+ _complete_inlock(opCtx, itMigration, std::move(callbackHandleWithStatus.getStatus()));
}
-void MigrationManager::_complete_inlock(OperationContext* txn,
+void MigrationManager::_complete_inlock(OperationContext* opCtx,
MigrationsList::iterator itMigration,
const RemoteCommandResponse& remoteCommandResponse) {
const NamespaceString nss(itMigration->nss);
@@ -547,8 +549,8 @@ void MigrationManager::_complete_inlock(OperationContext* txn,
migrations->erase(itMigration);
if (migrations->empty()) {
- Grid::get(txn)->catalogClient(txn)->getDistLockManager()->unlock(
- txn, _lockSessionID, nss.ns());
+ Grid::get(opCtx)->catalogClient(opCtx)->getDistLockManager()->unlock(
+ opCtx, _lockSessionID, nss.ns());
_activeMigrations.erase(it);
_checkDrained_inlock();
}
@@ -572,7 +574,7 @@ void MigrationManager::_waitForRecovery() {
_condVar.wait(lock, [this] { return _state != State::kRecovering; });
}
-void MigrationManager::_abandonActiveMigrationsAndEnableManager(OperationContext* txn) {
+void MigrationManager::_abandonActiveMigrationsAndEnableManager(OperationContext* opCtx) {
stdx::unique_lock<stdx::mutex> lock(_mutex);
if (_state == State::kStopping) {
// The balancer was interrupted. Let the next balancer recover the state.
@@ -580,16 +582,16 @@ void MigrationManager::_abandonActiveMigrationsAndEnableManager(OperationContext
}
invariant(_state == State::kRecovering);
- auto catalogClient = Grid::get(txn)->catalogClient(txn);
+ auto catalogClient = Grid::get(opCtx)->catalogClient(opCtx);
// Unlock all balancer distlocks we aren't using anymore.
auto distLockManager = catalogClient->getDistLockManager();
- distLockManager->unlockAll(txn, distLockManager->getProcessID());
+ distLockManager->unlockAll(opCtx, distLockManager->getProcessID());
// Clear the config.migrations collection so that those chunks can be scheduled for migration
// again.
catalogClient->removeConfigDocuments(
- txn, MigrationType::ConfigNS, BSONObj(), kMajorityWriteConcern);
+ opCtx, MigrationType::ConfigNS, BSONObj(), kMajorityWriteConcern);
_state = State::kEnabled;
_condVar.notify_all();
diff --git a/src/mongo/db/s/balancer/migration_manager.h b/src/mongo/db/s/balancer/migration_manager.h
index 011397c412a..26da3f057f7 100644
--- a/src/mongo/db/s/balancer/migration_manager.h
+++ b/src/mongo/db/s/balancer/migration_manager.h
@@ -79,7 +79,7 @@ public:
* Returns a map of migration Status objects to indicate the success/failure of each migration.
*/
MigrationStatuses executeMigrationsForAutoBalance(
- OperationContext* txn,
+ OperationContext* opCtx,
const std::vector<MigrateInfo>& migrateInfos,
uint64_t maxChunkSizeBytes,
const MigrationSecondaryThrottleOptions& secondaryThrottle,
@@ -92,7 +92,7 @@ public:
*
* Returns the status of the migration.
*/
- Status executeManualMigration(OperationContext* txn,
+ Status executeManualMigration(OperationContext* opCtx,
const MigrateInfo& migrateInfo,
uint64_t maxChunkSizeBytes,
const MigrationSecondaryThrottleOptions& secondaryThrottle,
@@ -106,7 +106,7 @@ public:
*
* The active migration recovery may fail and be abandoned, setting the state to kEnabled.
*/
- void startRecoveryAndAcquireDistLocks(OperationContext* txn);
+ void startRecoveryAndAcquireDistLocks(OperationContext* opCtx);
/**
* Blocking method that must only be called after startRecovery has been called. Recovers the
@@ -118,7 +118,7 @@ public:
* The active migration recovery may fail and be abandoned, setting the state to kEnabled and
* unblocking any process waiting on the recovery state.
*/
- void finishRecovery(OperationContext* txn,
+ void finishRecovery(OperationContext* opCtx,
uint64_t maxChunkSizeBytes,
const MigrationSecondaryThrottleOptions& secondaryThrottle);
@@ -181,7 +181,7 @@ private:
* can be used to obtain the outcome of the operation.
*/
std::shared_ptr<Notification<executor::RemoteCommandResponse>> _schedule(
- OperationContext* txn,
+ OperationContext* opCtx,
const MigrateInfo& migrateInfo,
uint64_t maxChunkSizeBytes,
const MigrationSecondaryThrottleOptions& secondaryThrottle,
@@ -194,7 +194,7 @@ private:
* The distributed lock is acquired before scheduling the first migration for the collection and
* is only released when all active migrations on the collection have finished.
*/
- void _schedule_inlock(OperationContext* txn,
+ void _schedule_inlock(OperationContext* opCtx,
const HostAndPort& targetHost,
Migration migration);
@@ -204,7 +204,7 @@ private:
* passed iterator and if this is the last migration for the collection will free the collection
* distributed lock.
*/
- void _complete_inlock(OperationContext* txn,
+ void _complete_inlock(OperationContext* opCtx,
MigrationsList::iterator itMigration,
const executor::RemoteCommandResponse& remoteCommandResponse);
@@ -226,7 +226,7 @@ private:
* that the balancer holds, clears the config.migrations collection, changes the state of the
* migration manager to kEnabled. Then unblocks all processes waiting for kEnabled state.
*/
- void _abandonActiveMigrationsAndEnableManager(OperationContext* txn);
+ void _abandonActiveMigrationsAndEnableManager(OperationContext* opCtx);
/**
* Parses a moveChunk RemoteCommandResponse's two levels of Status objects and distiguishes
diff --git a/src/mongo/db/s/balancer/migration_manager_test.cpp b/src/mongo/db/s/balancer/migration_manager_test.cpp
index b763cafd25b..dd2ee3d84d6 100644
--- a/src/mongo/db/s/balancer/migration_manager_test.cpp
+++ b/src/mongo/db/s/balancer/migration_manager_test.cpp
@@ -78,7 +78,7 @@ protected:
/**
* Returns the mock targeter for the specified shard. Useful to use like so
*
- * shardTargeterMock(txn, shardId)->setFindHostReturnValue(shardHost);
+ * shardTargeterMock(opCtx, shardId)->setFindHostReturnValue(shardHost);
*
* Then calls to RemoteCommandTargeterMock::findHost will return HostAndPort "shardHost" for
* Shard "shardId".
@@ -86,7 +86,7 @@ protected:
* Scheduling a command requires a shard host target. The command will be caught by the mock
* network, but sending the command requires finding the shard's host.
*/
- std::shared_ptr<RemoteCommandTargeterMock> shardTargeterMock(OperationContext* txn,
+ std::shared_ptr<RemoteCommandTargeterMock> shardTargeterMock(OperationContext* opCtx,
ShardId shardId);
/**
@@ -174,9 +174,9 @@ void MigrationManagerTest::tearDown() {
}
std::shared_ptr<RemoteCommandTargeterMock> MigrationManagerTest::shardTargeterMock(
- OperationContext* txn, ShardId shardId) {
+ OperationContext* opCtx, ShardId shardId) {
return RemoteCommandTargeterMock::get(
- uassertStatusOK(shardRegistry()->getShard(txn, shardId))->getTargeter());
+ uassertStatusOK(shardRegistry()->getShard(opCtx, shardId))->getTargeter());
}
void MigrationManagerTest::setUpDatabase(const std::string& dbName, const ShardId primaryShard) {
@@ -315,15 +315,15 @@ TEST_F(MigrationManagerTest, OneCollectionTwoMigrations) {
auto future = launchAsync([this, migrationRequests] {
ON_BLOCK_EXIT([&] { Client::destroy(); });
Client::initThreadIfNotAlready("Test");
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
// Scheduling the moveChunk commands requires finding a host to which to send the command.
// Set up dummy hosts for the source shards.
- shardTargeterMock(txn.get(), kShardId0)->setFindHostReturnValue(kShardHost0);
- shardTargeterMock(txn.get(), kShardId2)->setFindHostReturnValue(kShardHost2);
+ shardTargeterMock(opCtx.get(), kShardId0)->setFindHostReturnValue(kShardHost0);
+ shardTargeterMock(opCtx.get(), kShardId2)->setFindHostReturnValue(kShardHost2);
MigrationStatuses migrationStatuses = _migrationManager->executeMigrationsForAutoBalance(
- txn.get(), migrationRequests, 0, kDefaultSecondaryThrottle, false);
+ opCtx.get(), migrationRequests, 0, kDefaultSecondaryThrottle, false);
for (const auto& migrateInfo : migrationRequests) {
ASSERT_OK(migrationStatuses.at(migrateInfo.getName()));
@@ -378,15 +378,15 @@ TEST_F(MigrationManagerTest, TwoCollectionsTwoMigrationsEach) {
auto future = launchAsync([this, migrationRequests] {
ON_BLOCK_EXIT([&] { Client::destroy(); });
Client::initThreadIfNotAlready("Test");
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
// Scheduling the moveChunk commands requires finding a host to which to send the command.
// Set up dummy hosts for the source shards.
- shardTargeterMock(txn.get(), kShardId0)->setFindHostReturnValue(kShardHost0);
- shardTargeterMock(txn.get(), kShardId2)->setFindHostReturnValue(kShardHost2);
+ shardTargeterMock(opCtx.get(), kShardId0)->setFindHostReturnValue(kShardHost0);
+ shardTargeterMock(opCtx.get(), kShardId2)->setFindHostReturnValue(kShardHost2);
MigrationStatuses migrationStatuses = _migrationManager->executeMigrationsForAutoBalance(
- txn.get(), migrationRequests, 0, kDefaultSecondaryThrottle, false);
+ opCtx.get(), migrationRequests, 0, kDefaultSecondaryThrottle, false);
for (const auto& migrateInfo : migrationRequests) {
ASSERT_OK(migrationStatuses.at(migrateInfo.getName()));
@@ -433,17 +433,17 @@ TEST_F(MigrationManagerTest, SourceShardNotFound) {
auto future = launchAsync([this, chunk1, chunk2, migrationRequests] {
ON_BLOCK_EXIT([&] { Client::destroy(); });
Client::initThreadIfNotAlready("Test");
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
// Scheduling a moveChunk command requires finding a host to which to send the command. Set
// up a dummy host for kShardHost0, and return an error for kShardHost3.
- shardTargeterMock(txn.get(), kShardId0)->setFindHostReturnValue(kShardHost0);
- shardTargeterMock(txn.get(), kShardId2)
+ shardTargeterMock(opCtx.get(), kShardId0)->setFindHostReturnValue(kShardHost0);
+ shardTargeterMock(opCtx.get(), kShardId2)
->setFindHostReturnValue(
Status(ErrorCodes::ReplicaSetNotFound, "SourceShardNotFound generated error."));
MigrationStatuses migrationStatuses = _migrationManager->executeMigrationsForAutoBalance(
- txn.get(), migrationRequests, 0, kDefaultSecondaryThrottle, false);
+ opCtx.get(), migrationRequests, 0, kDefaultSecondaryThrottle, false);
ASSERT_OK(migrationStatuses.at(chunk1.getName()));
ASSERT_EQ(ErrorCodes::ReplicaSetNotFound, migrationStatuses.at(chunk2.getName()));
@@ -480,14 +480,14 @@ TEST_F(MigrationManagerTest, JumboChunkResponseBackwardsCompatibility) {
auto future = launchAsync([this, chunk1, migrationRequests] {
ON_BLOCK_EXIT([&] { Client::destroy(); });
Client::initThreadIfNotAlready("Test");
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
// Scheduling a moveChunk command requires finding a host to which to send the command. Set
// up a dummy host for kShardHost0.
- shardTargeterMock(txn.get(), kShardId0)->setFindHostReturnValue(kShardHost0);
+ shardTargeterMock(opCtx.get(), kShardId0)->setFindHostReturnValue(kShardHost0);
MigrationStatuses migrationStatuses = _migrationManager->executeMigrationsForAutoBalance(
- txn.get(), migrationRequests, 0, kDefaultSecondaryThrottle, false);
+ opCtx.get(), migrationRequests, 0, kDefaultSecondaryThrottle, false);
ASSERT_EQ(ErrorCodes::ChunkTooBig, migrationStatuses.at(chunk1.getName()));
});
@@ -519,15 +519,15 @@ TEST_F(MigrationManagerTest, InterruptMigration) {
auto future = launchAsync([&] {
ON_BLOCK_EXIT([&] { Client::destroy(); });
Client::initThreadIfNotAlready("Test");
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
// Scheduling a moveChunk command requires finding a host to which to send the command. Set
// up a dummy host for kShardHost0.
- shardTargeterMock(txn.get(), kShardId0)->setFindHostReturnValue(kShardHost0);
+ shardTargeterMock(opCtx.get(), kShardId0)->setFindHostReturnValue(kShardHost0);
ASSERT_EQ(ErrorCodes::BalancerInterrupted,
_migrationManager->executeManualMigration(
- txn.get(), {kShardId1, chunk}, 0, kDefaultSecondaryThrottle, false));
+ opCtx.get(), {kShardId1, chunk}, 0, kDefaultSecondaryThrottle, false));
});
// Wait till the move chunk request gets sent and pretend that it is stuck by never responding
@@ -608,14 +608,14 @@ TEST_F(MigrationManagerTest, RestartMigrationManager) {
auto future = launchAsync([&] {
ON_BLOCK_EXIT([&] { Client::destroy(); });
Client::initThreadIfNotAlready("Test");
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
// Scheduling a moveChunk command requires finding a host to which to send the command. Set
// up a dummy host for kShardHost0.
- shardTargeterMock(txn.get(), kShardId0)->setFindHostReturnValue(kShardHost0);
+ shardTargeterMock(opCtx.get(), kShardId0)->setFindHostReturnValue(kShardHost0);
ASSERT_OK(_migrationManager->executeManualMigration(
- txn.get(), {kShardId1, chunk1}, 0, kDefaultSecondaryThrottle, false));
+ opCtx.get(), {kShardId1, chunk1}, 0, kDefaultSecondaryThrottle, false));
});
// Expect only one moveChunk command to be called.
@@ -663,14 +663,14 @@ TEST_F(MigrationManagerTest, MigrationRecovery) {
auto future = launchAsync([this] {
ON_BLOCK_EXIT([&] { Client::destroy(); });
Client::initThreadIfNotAlready("Test");
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
// Scheduling the moveChunk commands requires finding hosts to which to send the commands.
// Set up dummy hosts for the source shards.
- shardTargeterMock(txn.get(), kShardId0)->setFindHostReturnValue(kShardHost0);
- shardTargeterMock(txn.get(), kShardId2)->setFindHostReturnValue(kShardHost2);
+ shardTargeterMock(opCtx.get(), kShardId0)->setFindHostReturnValue(kShardHost0);
+ shardTargeterMock(opCtx.get(), kShardId2)->setFindHostReturnValue(kShardHost2);
- _migrationManager->finishRecovery(txn.get(), 0, kDefaultSecondaryThrottle);
+ _migrationManager->finishRecovery(opCtx.get(), 0, kDefaultSecondaryThrottle);
});
// Expect two moveChunk commands.
@@ -765,15 +765,15 @@ TEST_F(MigrationManagerTest, RemoteCallErrorConversionToOperationFailed) {
auto future = launchAsync([&] {
ON_BLOCK_EXIT([&] { Client::destroy(); });
Client::initThreadIfNotAlready("Test");
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
// Scheduling the moveChunk commands requires finding a host to which to send the command.
// Set up dummy hosts for the source shards.
- shardTargeterMock(txn.get(), kShardId0)->setFindHostReturnValue(kShardHost0);
- shardTargeterMock(txn.get(), kShardId2)->setFindHostReturnValue(kShardHost2);
+ shardTargeterMock(opCtx.get(), kShardId0)->setFindHostReturnValue(kShardHost0);
+ shardTargeterMock(opCtx.get(), kShardId2)->setFindHostReturnValue(kShardHost2);
MigrationStatuses migrationStatuses = _migrationManager->executeMigrationsForAutoBalance(
- txn.get(),
+ opCtx.get(),
{{kShardId1, chunk1}, {kShardId3, chunk2}},
0,
kDefaultSecondaryThrottle,
diff --git a/src/mongo/db/s/balancer/scoped_migration_request.cpp b/src/mongo/db/s/balancer/scoped_migration_request.cpp
index af737fd0640..bbbcb0174f0 100644
--- a/src/mongo/db/s/balancer/scoped_migration_request.cpp
+++ b/src/mongo/db/s/balancer/scoped_migration_request.cpp
@@ -49,14 +49,14 @@ const WriteConcernOptions kMajorityWriteConcern(WriteConcernOptions::kMajority,
const int kDuplicateKeyErrorMaxRetries = 2;
}
-ScopedMigrationRequest::ScopedMigrationRequest(OperationContext* txn,
+ScopedMigrationRequest::ScopedMigrationRequest(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& minKey)
- : _txn(txn), _nss(nss), _minKey(minKey) {}
+ : _opCtx(opCtx), _nss(nss), _minKey(minKey) {}
ScopedMigrationRequest::~ScopedMigrationRequest() {
- if (!_txn) {
- // If the txn object was cleared, nothing should happen in the destructor.
+ if (!_opCtx) {
+ // If the opCtx object was cleared, nothing should happen in the destructor.
return;
}
@@ -64,8 +64,8 @@ ScopedMigrationRequest::~ScopedMigrationRequest() {
// okay.
BSONObj migrationDocumentIdentifier =
BSON(MigrationType::ns(_nss.ns()) << MigrationType::min(_minKey));
- Status result = grid.catalogClient(_txn)->removeConfigDocuments(
- _txn, MigrationType::ConfigNS, migrationDocumentIdentifier, kMajorityWriteConcern);
+ Status result = grid.catalogClient(_opCtx)->removeConfigDocuments(
+ _opCtx, MigrationType::ConfigNS, migrationDocumentIdentifier, kMajorityWriteConcern);
if (!result.isOK()) {
LOG(0) << "Failed to remove config.migrations document for migration '"
@@ -75,31 +75,31 @@ ScopedMigrationRequest::~ScopedMigrationRequest() {
ScopedMigrationRequest::ScopedMigrationRequest(ScopedMigrationRequest&& other) {
*this = std::move(other);
- // Set txn to null so that the destructor will do nothing.
- other._txn = nullptr;
+ // Set opCtx to null so that the destructor will do nothing.
+ other._opCtx = nullptr;
}
ScopedMigrationRequest& ScopedMigrationRequest::operator=(ScopedMigrationRequest&& other) {
if (this != &other) {
- _txn = other._txn;
+ _opCtx = other._opCtx;
_nss = other._nss;
_minKey = other._minKey;
- // Set txn to null so that the destructor will do nothing.
- other._txn = nullptr;
+ // Set opCtx to null so that the destructor will do nothing.
+ other._opCtx = nullptr;
}
return *this;
}
StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
- OperationContext* txn, const MigrateInfo& migrateInfo, bool waitForDelete) {
+ OperationContext* opCtx, const MigrateInfo& migrateInfo, bool waitForDelete) {
// Try to write a unique migration document to config.migrations.
const MigrationType migrationType(migrateInfo, waitForDelete);
for (int retry = 0; retry < kDuplicateKeyErrorMaxRetries; ++retry) {
- Status result = grid.catalogClient(txn)->insertConfigDocument(
- txn, MigrationType::ConfigNS, migrationType.toBSON(), kMajorityWriteConcern);
+ Status result = grid.catalogClient(opCtx)->insertConfigDocument(
+ opCtx, MigrationType::ConfigNS, migrationType.toBSON(), kMajorityWriteConcern);
if (result == ErrorCodes::DuplicateKey) {
// If the exact migration described by "migrateInfo" is active, return a scoped object
@@ -107,7 +107,7 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
// scheduled.
auto statusWithMigrationQueryResult =
grid.shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
NamespaceString(MigrationType::ConfigNS),
@@ -160,7 +160,7 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
// safe (won't delete another migration's document) and necessary to try to clean up the
// document via the destructor.
ScopedMigrationRequest scopedMigrationRequest(
- txn, NamespaceString(migrateInfo.ns), migrateInfo.minKey);
+ opCtx, NamespaceString(migrateInfo.ns), migrateInfo.minKey);
// If there was a write error, let the object go out of scope and clean up in the
// destructor.
@@ -180,28 +180,28 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
<< "' was being moved (somewhere) by another operation.");
}
-ScopedMigrationRequest ScopedMigrationRequest::createForRecovery(OperationContext* txn,
+ScopedMigrationRequest ScopedMigrationRequest::createForRecovery(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& minKey) {
- return ScopedMigrationRequest(txn, nss, minKey);
+ return ScopedMigrationRequest(opCtx, nss, minKey);
}
Status ScopedMigrationRequest::tryToRemoveMigration() {
- invariant(_txn);
+ invariant(_opCtx);
BSONObj migrationDocumentIdentifier =
BSON(MigrationType::ns(_nss.ns()) << MigrationType::min(_minKey));
- Status status = grid.catalogClient(_txn)->removeConfigDocuments(
- _txn, MigrationType::ConfigNS, migrationDocumentIdentifier, kMajorityWriteConcern);
+ Status status = grid.catalogClient(_opCtx)->removeConfigDocuments(
+ _opCtx, MigrationType::ConfigNS, migrationDocumentIdentifier, kMajorityWriteConcern);
if (status.isOK()) {
// Don't try to do a no-op remove in the destructor.
- _txn = nullptr;
+ _opCtx = nullptr;
}
return status;
}
void ScopedMigrationRequest::keepDocumentOnDestruct() {
- invariant(_txn);
- _txn = nullptr;
+ invariant(_opCtx);
+ _opCtx = nullptr;
LOG(1) << "Keeping config.migrations document with namespace '" << _nss << "' and minKey '"
<< _minKey << "' for balancer recovery";
}
diff --git a/src/mongo/db/s/balancer/scoped_migration_request.h b/src/mongo/db/s/balancer/scoped_migration_request.h
index e3b4e3301da..b3f100d92d6 100644
--- a/src/mongo/db/s/balancer/scoped_migration_request.h
+++ b/src/mongo/db/s/balancer/scoped_migration_request.h
@@ -66,7 +66,7 @@ public:
*
* The destructor will handle removing the document when it is no longer needed.
*/
- static StatusWith<ScopedMigrationRequest> writeMigration(OperationContext* txn,
+ static StatusWith<ScopedMigrationRequest> writeMigration(OperationContext* opCtx,
const MigrateInfo& migrate,
bool waitForDelete);
@@ -77,7 +77,7 @@ public:
* This should only be used on Balancer recovery when a config.migrations document already
* exists for the migration.
*/
- static ScopedMigrationRequest createForRecovery(OperationContext* txn,
+ static ScopedMigrationRequest createForRecovery(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& minKey);
@@ -102,12 +102,12 @@ public:
void keepDocumentOnDestruct();
private:
- ScopedMigrationRequest(OperationContext* txn,
+ ScopedMigrationRequest(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& minKey);
// Need an operation context with which to do a write in the destructor.
- OperationContext* _txn;
+ OperationContext* _opCtx;
// ns and minkey are needed to identify the migration document when it is removed from
// config.migrations by the destructor.
diff --git a/src/mongo/db/s/check_sharding_index_command.cpp b/src/mongo/db/s/check_sharding_index_command.cpp
index 0d7ef33de31..79f4b50cbc9 100644
--- a/src/mongo/db/s/check_sharding_index_command.cpp
+++ b/src/mongo/db/s/check_sharding_index_command.cpp
@@ -82,7 +82,7 @@ public:
return parseNsFullyQualified(dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& jsobj,
int options,
@@ -108,7 +108,7 @@ public:
return false;
}
- AutoGetCollection autoColl(txn, nss, MODE_IS);
+ AutoGetCollection autoColl(opCtx, nss, MODE_IS);
Collection* const collection = autoColl.getCollection();
if (!collection) {
@@ -117,7 +117,7 @@ public:
}
IndexDescriptor* idx =
- collection->getIndexCatalog()->findShardKeyPrefixedIndex(txn,
+ collection->getIndexCatalog()->findShardKeyPrefixedIndex(opCtx,
keyPattern,
true); // requireSingleKey
if (idx == NULL) {
@@ -136,7 +136,7 @@ public:
}
unique_ptr<PlanExecutor> exec(
- InternalPlanner::indexScan(txn,
+ InternalPlanner::indexScan(opCtx,
collection,
idx,
min,
@@ -150,7 +150,7 @@ public:
// this index.
// NOTE A local copy of 'missingField' is made because indices may be
// invalidated during a db lock yield.
- BSONObj missingFieldObj = IndexLegacy::getMissingField(txn, collection, idx->infoObj());
+ BSONObj missingFieldObj = IndexLegacy::getMissingField(opCtx, collection, idx->infoObj());
BSONElement missingField = missingFieldObj.firstElement();
// for now, the only check is that all shard keys are filled
@@ -180,7 +180,7 @@ public:
// This is a fetch, but it's OK. The underlying code won't throw a page fault
// exception.
- BSONObj obj = collection->docFor(txn, loc).value();
+ BSONObj obj = collection->docFor(opCtx, loc).value();
BSONObjIterator j(keyPattern);
BSONElement real;
for (int x = 0; x <= k; x++)
diff --git a/src/mongo/db/s/chunk_move_write_concern_options.cpp b/src/mongo/db/s/chunk_move_write_concern_options.cpp
index 32ecdaf56dc..700f134a604 100644
--- a/src/mongo/db/s/chunk_move_write_concern_options.cpp
+++ b/src/mongo/db/s/chunk_move_write_concern_options.cpp
@@ -66,10 +66,10 @@ WriteConcernOptions getDefaultWriteConcernForMigration() {
} // namespace
StatusWith<WriteConcernOptions> ChunkMoveWriteConcernOptions::getEffectiveWriteConcern(
- OperationContext* txn, const MigrationSecondaryThrottleOptions& options) {
+ OperationContext* opCtx, const MigrationSecondaryThrottleOptions& options) {
auto secondaryThrottle = options.getSecondaryThrottle();
if (secondaryThrottle == MigrationSecondaryThrottleOptions::kDefault) {
- if (txn->getServiceContext()->getGlobalStorageEngine()->supportsDocLocking()) {
+ if (opCtx->getServiceContext()->getGlobalStorageEngine()->supportsDocLocking()) {
secondaryThrottle = MigrationSecondaryThrottleOptions::kOff;
} else {
secondaryThrottle = MigrationSecondaryThrottleOptions::kOn;
diff --git a/src/mongo/db/s/chunk_move_write_concern_options.h b/src/mongo/db/s/chunk_move_write_concern_options.h
index b9734120b2a..e3b380a8634 100644
--- a/src/mongo/db/s/chunk_move_write_concern_options.h
+++ b/src/mongo/db/s/chunk_move_write_concern_options.h
@@ -60,7 +60,7 @@ public:
* concern.
*/
static StatusWith<WriteConcernOptions> getEffectiveWriteConcern(
- OperationContext* txn, const MigrationSecondaryThrottleOptions& options);
+ OperationContext* opCtx, const MigrationSecondaryThrottleOptions& options);
};
} // namespace mongo
diff --git a/src/mongo/db/s/cleanup_orphaned_cmd.cpp b/src/mongo/db/s/cleanup_orphaned_cmd.cpp
index 038ecbfbdeb..d6690f872ef 100644
--- a/src/mongo/db/s/cleanup_orphaned_cmd.cpp
+++ b/src/mongo/db/s/cleanup_orphaned_cmd.cpp
@@ -72,7 +72,7 @@ enum CleanupResult { CleanupResult_Done, CleanupResult_Continue, CleanupResult_E
*
* If the collection is not sharded, returns CleanupResult_Done.
*/
-CleanupResult cleanupOrphanedData(OperationContext* txn,
+CleanupResult cleanupOrphanedData(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& startingFromKeyConst,
const WriteConcernOptions& secondaryThrottle,
@@ -82,8 +82,8 @@ CleanupResult cleanupOrphanedData(OperationContext* txn,
ScopedCollectionMetadata metadata;
{
- AutoGetCollection autoColl(txn, ns, MODE_IS);
- metadata = CollectionShardingState::get(txn, ns.toString())->getMetadata();
+ AutoGetCollection autoColl(opCtx, ns, MODE_IS);
+ metadata = CollectionShardingState::get(opCtx, ns.toString())->getMetadata();
}
if (!metadata || metadata->getKeyPattern().isEmpty()) {
@@ -132,7 +132,7 @@ CleanupResult cleanupOrphanedData(OperationContext* txn,
deleterOptions.waitForOpenCursors = true;
deleterOptions.removeSaverReason = "cleanup-cmd";
- if (!getDeleter()->deleteNow(txn, deleterOptions, errMsg)) {
+ if (!getDeleter()->deleteNow(opCtx, deleterOptions, errMsg)) {
warning() << redact(*errMsg);
return CleanupResult_Error;
}
@@ -203,7 +203,7 @@ public:
// Output
static BSONField<BSONObj> stoppedAtKeyField;
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
string const& db,
BSONObj& cmdObj,
int,
@@ -227,9 +227,9 @@ public:
const auto secondaryThrottle =
uassertStatusOK(MigrationSecondaryThrottleOptions::createFromCommand(cmdObj));
const auto writeConcern = uassertStatusOK(
- ChunkMoveWriteConcernOptions::getEffectiveWriteConcern(txn, secondaryThrottle));
+ ChunkMoveWriteConcernOptions::getEffectiveWriteConcern(opCtx, secondaryThrottle));
- ShardingState* const shardingState = ShardingState::get(txn);
+ ShardingState* const shardingState = ShardingState::get(opCtx);
if (!shardingState->enabled()) {
errmsg = str::stream() << "server is not part of a sharded cluster or "
@@ -238,7 +238,7 @@ public:
}
ChunkVersion shardVersion;
- Status status = shardingState->refreshMetadataNow(txn, nss, &shardVersion);
+ Status status = shardingState->refreshMetadataNow(opCtx, nss, &shardVersion);
if (!status.isOK()) {
if (status.code() == ErrorCodes::RemoteChangeDetected) {
warning() << "Shard version in transition detected while refreshing "
@@ -251,7 +251,7 @@ public:
BSONObj stoppedAtKey;
CleanupResult cleanupResult =
- cleanupOrphanedData(txn, nss, startingFromKey, writeConcern, &stoppedAtKey, &errmsg);
+ cleanupOrphanedData(opCtx, nss, startingFromKey, writeConcern, &stoppedAtKey, &errmsg);
if (cleanupResult == CleanupResult_Error) {
return false;
diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index 778a28285d9..cdb469610bc 100644
--- a/src/mongo/db/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -76,9 +76,9 @@ protected:
auto future = launchAsync([this] {
ON_BLOCK_EXIT([&] { Client::destroy(); });
Client::initThreadIfNotAlready("Test");
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
- auto status = MetadataLoader::makeCollectionMetadata(txn.get(),
+ auto status = MetadataLoader::makeCollectionMetadata(opCtx.get(),
catalogClient(),
"test.foo",
"shard0000",
@@ -305,9 +305,9 @@ protected:
auto future = launchAsync([this] {
ON_BLOCK_EXIT([&] { Client::destroy(); });
Client::initThreadIfNotAlready("Test");
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
- auto status = MetadataLoader::makeCollectionMetadata(txn.get(),
+ auto status = MetadataLoader::makeCollectionMetadata(opCtx.get(),
catalogClient(),
"test.foo",
"shard0000",
@@ -430,9 +430,9 @@ protected:
auto future = launchAsync([this] {
ON_BLOCK_EXIT([&] { Client::destroy(); });
Client::initThreadIfNotAlready("Test");
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
- auto status = MetadataLoader::makeCollectionMetadata(txn.get(),
+ auto status = MetadataLoader::makeCollectionMetadata(opCtx.get(),
catalogClient(),
"test.foo",
"shard0000",
@@ -509,9 +509,9 @@ protected:
auto future = launchAsync([this] {
ON_BLOCK_EXIT([&] { Client::destroy(); });
Client::initThreadIfNotAlready("Test");
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
- auto status = MetadataLoader::makeCollectionMetadata(txn.get(),
+ auto status = MetadataLoader::makeCollectionMetadata(opCtx.get(),
catalogClient(),
"test.foo",
"shard0000",
@@ -633,9 +633,9 @@ protected:
auto future = launchAsync([this] {
ON_BLOCK_EXIT([&] { Client::destroy(); });
Client::initThreadIfNotAlready("Test");
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
- auto status = MetadataLoader::makeCollectionMetadata(txn.get(),
+ auto status = MetadataLoader::makeCollectionMetadata(opCtx.get(),
catalogClient(),
"test.foo",
"shard0000",
diff --git a/src/mongo/db/s/collection_range_deleter.cpp b/src/mongo/db/s/collection_range_deleter.cpp
index 30855a9b210..2349eb4a3bd 100644
--- a/src/mongo/db/s/collection_range_deleter.cpp
+++ b/src/mongo/db/s/collection_range_deleter.cpp
@@ -75,29 +75,29 @@ CollectionRangeDeleter::CollectionRangeDeleter(NamespaceString nss) : _nss(std::
void CollectionRangeDeleter::run() {
Client::initThread(getThreadName().c_str());
ON_BLOCK_EXIT([&] { Client::destroy(); });
- auto txn = cc().makeOperationContext().get();
+ auto opCtx = cc().makeOperationContext().get();
const int maxToDelete = std::max(int(internalQueryExecYieldIterations.load()), 1);
- bool hasNextRangeToClean = cleanupNextRange(txn, maxToDelete);
+ bool hasNextRangeToClean = cleanupNextRange(opCtx, maxToDelete);
// If there are more ranges to run, we add <this> back onto the task executor to run again.
if (hasNextRangeToClean) {
- auto executor = ShardingState::get(txn)->getRangeDeleterTaskExecutor();
+ auto executor = ShardingState::get(opCtx)->getRangeDeleterTaskExecutor();
executor->scheduleWork([this](const CallbackArgs& cbArgs) { run(); });
} else {
delete this;
}
}
-bool CollectionRangeDeleter::cleanupNextRange(OperationContext* txn, int maxToDelete) {
+bool CollectionRangeDeleter::cleanupNextRange(OperationContext* opCtx, int maxToDelete) {
{
- AutoGetCollection autoColl(txn, _nss, MODE_IX);
+ AutoGetCollection autoColl(opCtx, _nss, MODE_IX);
auto* collection = autoColl.getCollection();
if (!collection) {
return false;
}
- auto* collectionShardingState = CollectionShardingState::get(txn, _nss);
+ auto* collectionShardingState = CollectionShardingState::get(opCtx, _nss);
dassert(collectionShardingState != nullptr); // every collection gets one
auto& metadataManager = collectionShardingState->_metadataManager;
@@ -117,7 +117,7 @@ bool CollectionRangeDeleter::cleanupNextRange(OperationContext* txn, int maxToDe
auto scopedCollectionMetadata = collectionShardingState->getMetadata();
int numDocumentsDeleted =
- _doDeletion(txn, collection, scopedCollectionMetadata->getKeyPattern(), maxToDelete);
+ _doDeletion(opCtx, collection, scopedCollectionMetadata->getKeyPattern(), maxToDelete);
if (numDocumentsDeleted <= 0) {
metadataManager.removeRangeToClean(_rangeInProgress.get());
_rangeInProgress = boost::none;
@@ -127,8 +127,9 @@ bool CollectionRangeDeleter::cleanupNextRange(OperationContext* txn, int maxToDe
// wait for replication
WriteConcernResult wcResult;
- auto currentClientOpTime = repl::ReplClientInfo::forClient(txn->getClient()).getLastOp();
- Status status = waitForWriteConcern(txn, currentClientOpTime, kMajorityWriteConcern, &wcResult);
+ auto currentClientOpTime = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp();
+ Status status =
+ waitForWriteConcern(opCtx, currentClientOpTime, kMajorityWriteConcern, &wcResult);
if (!status.isOK()) {
warning() << "Error when waiting for write concern after removing chunks in " << _nss
<< " : " << status.reason();
@@ -137,7 +138,7 @@ bool CollectionRangeDeleter::cleanupNextRange(OperationContext* txn, int maxToDe
return true;
}
-int CollectionRangeDeleter::_doDeletion(OperationContext* txn,
+int CollectionRangeDeleter::_doDeletion(OperationContext* opCtx,
Collection* collection,
const BSONObj& keyPattern,
int maxToDelete) {
@@ -147,7 +148,7 @@ int CollectionRangeDeleter::_doDeletion(OperationContext* txn,
// The IndexChunk has a keyPattern that may apply to more than one index - we need to
// select the index and get the full index keyPattern here.
const IndexDescriptor* idx =
- collection->getIndexCatalog()->findShardKeyPrefixedIndex(txn, keyPattern, false);
+ collection->getIndexCatalog()->findShardKeyPrefixedIndex(opCtx, keyPattern, false);
if (idx == NULL) {
warning() << "Unable to find shard key index for " << keyPattern.toString() << " in "
<< _nss;
@@ -165,7 +166,7 @@ int CollectionRangeDeleter::_doDeletion(OperationContext* txn,
LOG(1) << "begin removal of " << min << " to " << max << " in " << _nss;
auto indexName = idx->indexName();
- IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByName(txn, indexName);
+ IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByName(opCtx, indexName);
if (!desc) {
warning() << "shard key index with name " << indexName << " on '" << _nss
<< "' was dropped";
@@ -174,7 +175,7 @@ int CollectionRangeDeleter::_doDeletion(OperationContext* txn,
int numDeleted = 0;
do {
- auto exec = InternalPlanner::indexScan(txn,
+ auto exec = InternalPlanner::indexScan(opCtx,
collection,
desc,
min,
@@ -198,14 +199,14 @@ int CollectionRangeDeleter::_doDeletion(OperationContext* txn,
}
invariant(PlanExecutor::ADVANCED == state);
- WriteUnitOfWork wuow(txn);
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, _nss)) {
+ WriteUnitOfWork wuow(opCtx);
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, _nss)) {
warning() << "stepped down from primary while deleting chunk; orphaning data in "
<< _nss << " in range [" << min << ", " << max << ")";
break;
}
OpDebug* const nullOpDebug = nullptr;
- collection->deleteDocument(txn, rloc, nullOpDebug, true);
+ collection->deleteDocument(opCtx, rloc, nullOpDebug, true);
wuow.commit();
} while (++numDeleted < maxToDelete);
return numDeleted;
diff --git a/src/mongo/db/s/collection_range_deleter.h b/src/mongo/db/s/collection_range_deleter.h
index 4cb52d1ee3f..f611215a73d 100644
--- a/src/mongo/db/s/collection_range_deleter.h
+++ b/src/mongo/db/s/collection_range_deleter.h
@@ -56,7 +56,7 @@ public:
* Returns true if there are more entries in rangesToClean, false if there is no more progress
* to be made.
*/
- bool cleanupNextRange(OperationContext* txn, int maxToDelete);
+ bool cleanupNextRange(OperationContext* opCtx, int maxToDelete);
private:
/**
@@ -65,7 +65,7 @@ private:
*
* Returns the number of documents deleted (0 if deletion is finished), or -1 for error.
*/
- int _doDeletion(OperationContext* txn,
+ int _doDeletion(OperationContext* opCtx,
Collection* collection,
const BSONObj& keyPattern,
int maxToDelete);
diff --git a/src/mongo/db/s/collection_sharding_state.cpp b/src/mongo/db/s/collection_sharding_state.cpp
index e35c94e6352..24746d7880e 100644
--- a/src/mongo/db/s/collection_sharding_state.cpp
+++ b/src/mongo/db/s/collection_sharding_state.cpp
@@ -66,18 +66,18 @@ using std::string;
*/
class ShardIdentityLogOpHandler final : public RecoveryUnit::Change {
public:
- ShardIdentityLogOpHandler(OperationContext* txn, ShardIdentityType shardIdentity)
- : _txn(txn), _shardIdentity(std::move(shardIdentity)) {}
+ ShardIdentityLogOpHandler(OperationContext* opCtx, ShardIdentityType shardIdentity)
+ : _opCtx(opCtx), _shardIdentity(std::move(shardIdentity)) {}
void commit() override {
- fassertNoTrace(40071,
- ShardingState::get(_txn)->initializeFromShardIdentity(_txn, _shardIdentity));
+ fassertNoTrace(
+ 40071, ShardingState::get(_opCtx)->initializeFromShardIdentity(_opCtx, _shardIdentity));
}
void rollback() override {}
private:
- OperationContext* _txn;
+ OperationContext* _opCtx;
const ShardIdentityType _shardIdentity;
};
@@ -90,27 +90,27 @@ CollectionShardingState::~CollectionShardingState() {
invariant(!_sourceMgr);
}
-CollectionShardingState* CollectionShardingState::get(OperationContext* txn,
+CollectionShardingState* CollectionShardingState::get(OperationContext* opCtx,
const NamespaceString& nss) {
- return CollectionShardingState::get(txn, nss.ns());
+ return CollectionShardingState::get(opCtx, nss.ns());
}
-CollectionShardingState* CollectionShardingState::get(OperationContext* txn,
+CollectionShardingState* CollectionShardingState::get(OperationContext* opCtx,
const std::string& ns) {
// Collection lock must be held to have a reference to the collection's sharding state
- dassert(txn->lockState()->isCollectionLockedForMode(ns, MODE_IS));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(ns, MODE_IS));
- ShardingState* const shardingState = ShardingState::get(txn);
- return shardingState->getNS(ns, txn);
+ ShardingState* const shardingState = ShardingState::get(opCtx);
+ return shardingState->getNS(ns, opCtx);
}
ScopedCollectionMetadata CollectionShardingState::getMetadata() {
return _metadataManager.getActiveMetadata();
}
-void CollectionShardingState::refreshMetadata(OperationContext* txn,
+void CollectionShardingState::refreshMetadata(OperationContext* opCtx,
std::unique_ptr<CollectionMetadata> newMetadata) {
- invariant(txn->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_X));
_metadataManager.refreshActiveMetadata(std::move(newMetadata));
}
@@ -131,27 +131,27 @@ MigrationSourceManager* CollectionShardingState::getMigrationSourceManager() {
return _sourceMgr;
}
-void CollectionShardingState::setMigrationSourceManager(OperationContext* txn,
+void CollectionShardingState::setMigrationSourceManager(OperationContext* opCtx,
MigrationSourceManager* sourceMgr) {
- invariant(txn->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_X));
+ invariant(opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_X));
invariant(sourceMgr);
invariant(!_sourceMgr);
_sourceMgr = sourceMgr;
}
-void CollectionShardingState::clearMigrationSourceManager(OperationContext* txn) {
- invariant(txn->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_X));
+void CollectionShardingState::clearMigrationSourceManager(OperationContext* opCtx) {
+ invariant(opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_X));
invariant(_sourceMgr);
_sourceMgr = nullptr;
}
-void CollectionShardingState::checkShardVersionOrThrow(OperationContext* txn) {
+void CollectionShardingState::checkShardVersionOrThrow(OperationContext* opCtx) {
string errmsg;
ChunkVersion received;
ChunkVersion wanted;
- if (!_checkShardVersionOk(txn, &errmsg, &received, &wanted)) {
+ if (!_checkShardVersionOk(opCtx, &errmsg, &received, &wanted)) {
throw SendStaleConfigException(
_nss.ns(),
str::stream() << "[" << _nss.ns() << "] shard version not ok: " << errmsg,
@@ -172,19 +172,19 @@ bool CollectionShardingState::collectionIsSharded() {
return true;
}
-bool CollectionShardingState::isDocumentInMigratingChunk(OperationContext* txn,
+bool CollectionShardingState::isDocumentInMigratingChunk(OperationContext* opCtx,
const BSONObj& doc) {
- dassert(txn->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IX));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IX));
if (_sourceMgr) {
- return _sourceMgr->getCloner()->isDocumentInMigratingChunk(txn, doc);
+ return _sourceMgr->getCloner()->isDocumentInMigratingChunk(opCtx, doc);
}
return false;
}
-void CollectionShardingState::onInsertOp(OperationContext* txn, const BSONObj& insertedDoc) {
- dassert(txn->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IX));
+void CollectionShardingState::onInsertOp(OperationContext* opCtx, const BSONObj& insertedDoc) {
+ dassert(opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IX));
if (serverGlobalParams.clusterRole == ClusterRole::ShardServer &&
_nss == NamespaceString::kConfigCollectionNamespace) {
@@ -192,32 +192,32 @@ void CollectionShardingState::onInsertOp(OperationContext* txn, const BSONObj& i
if (idElem.str() == ShardIdentityType::IdName) {
auto shardIdentityDoc = uassertStatusOK(ShardIdentityType::fromBSON(insertedDoc));
uassertStatusOK(shardIdentityDoc.validate());
- txn->recoveryUnit()->registerChange(
- new ShardIdentityLogOpHandler(txn, std::move(shardIdentityDoc)));
+ opCtx->recoveryUnit()->registerChange(
+ new ShardIdentityLogOpHandler(opCtx, std::move(shardIdentityDoc)));
}
}
}
- checkShardVersionOrThrow(txn);
+ checkShardVersionOrThrow(opCtx);
if (_sourceMgr) {
- _sourceMgr->getCloner()->onInsertOp(txn, insertedDoc);
+ _sourceMgr->getCloner()->onInsertOp(opCtx, insertedDoc);
}
}
-void CollectionShardingState::onUpdateOp(OperationContext* txn, const BSONObj& updatedDoc) {
- dassert(txn->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IX));
+void CollectionShardingState::onUpdateOp(OperationContext* opCtx, const BSONObj& updatedDoc) {
+ dassert(opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IX));
- checkShardVersionOrThrow(txn);
+ checkShardVersionOrThrow(opCtx);
if (_sourceMgr) {
- _sourceMgr->getCloner()->onUpdateOp(txn, updatedDoc);
+ _sourceMgr->getCloner()->onUpdateOp(opCtx, updatedDoc);
}
}
-void CollectionShardingState::onDeleteOp(OperationContext* txn,
+void CollectionShardingState::onDeleteOp(OperationContext* opCtx,
const CollectionShardingState::DeleteState& deleteState) {
- dassert(txn->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IX));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IX));
if (serverGlobalParams.clusterRole == ClusterRole::ShardServer &&
_nss == NamespaceString::kConfigCollectionNamespace) {
@@ -225,13 +225,13 @@ void CollectionShardingState::onDeleteOp(OperationContext* txn,
if (auto idElem = deleteState.idDoc["_id"]) {
auto idStr = idElem.str();
if (idStr == ShardIdentityType::IdName) {
- if (!repl::ReplicationCoordinator::get(txn)->getMemberState().rollback()) {
+ if (!repl::ReplicationCoordinator::get(opCtx)->getMemberState().rollback()) {
uasserted(40070,
"cannot delete shardIdentity document while in --shardsvr mode");
} else {
warning() << "Shard identity document rolled back. Will shut down after "
"finishing rollback.";
- ShardIdentityRollbackNotifier::get(txn)->recordThatRollbackHappened();
+ ShardIdentityRollbackNotifier::get(opCtx)->recordThatRollbackHappened();
}
}
}
@@ -239,70 +239,74 @@ void CollectionShardingState::onDeleteOp(OperationContext* txn,
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
if (_nss == VersionType::ConfigNS) {
- if (!repl::ReplicationCoordinator::get(txn)->getMemberState().rollback()) {
+ if (!repl::ReplicationCoordinator::get(opCtx)->getMemberState().rollback()) {
uasserted(40302, "cannot delete config.version document while in --configsvr mode");
} else {
// Throw out any cached information related to the cluster ID.
- Grid::get(txn)->catalogManager()->discardCachedConfigDatabaseInitializationState();
- ClusterIdentityLoader::get(txn)->discardCachedClusterId();
+ Grid::get(opCtx)
+ ->catalogManager()
+ ->discardCachedConfigDatabaseInitializationState();
+ ClusterIdentityLoader::get(opCtx)->discardCachedClusterId();
}
}
}
- checkShardVersionOrThrow(txn);
+ checkShardVersionOrThrow(opCtx);
if (_sourceMgr && deleteState.isMigrating) {
- _sourceMgr->getCloner()->onDeleteOp(txn, deleteState.idDoc);
+ _sourceMgr->getCloner()->onDeleteOp(opCtx, deleteState.idDoc);
}
}
-void CollectionShardingState::onDropCollection(OperationContext* txn,
+void CollectionShardingState::onDropCollection(OperationContext* opCtx,
const NamespaceString& collectionName) {
- dassert(txn->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IX));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IX));
if (serverGlobalParams.clusterRole == ClusterRole::ShardServer &&
_nss == NamespaceString::kConfigCollectionNamespace) {
// Dropping system collections is not allowed for end users.
- invariant(!txn->writesAreReplicated());
- invariant(repl::ReplicationCoordinator::get(txn)->getMemberState().rollback());
+ invariant(!opCtx->writesAreReplicated());
+ invariant(repl::ReplicationCoordinator::get(opCtx)->getMemberState().rollback());
// Can't confirm whether there was a ShardIdentity document or not yet, so assume there was
// one and shut down the process to clear the in-memory sharding state.
warning() << "admin.system.version collection rolled back. Will shut down after "
"finishing rollback";
- ShardIdentityRollbackNotifier::get(txn)->recordThatRollbackHappened();
+ ShardIdentityRollbackNotifier::get(opCtx)->recordThatRollbackHappened();
}
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
if (_nss == VersionType::ConfigNS) {
- if (!repl::ReplicationCoordinator::get(txn)->getMemberState().rollback()) {
+ if (!repl::ReplicationCoordinator::get(opCtx)->getMemberState().rollback()) {
uasserted(40303, "cannot drop config.version document while in --configsvr mode");
} else {
// Throw out any cached information related to the cluster ID.
- Grid::get(txn)->catalogManager()->discardCachedConfigDatabaseInitializationState();
- ClusterIdentityLoader::get(txn)->discardCachedClusterId();
+ Grid::get(opCtx)
+ ->catalogManager()
+ ->discardCachedConfigDatabaseInitializationState();
+ ClusterIdentityLoader::get(opCtx)->discardCachedClusterId();
}
}
}
}
-bool CollectionShardingState::_checkShardVersionOk(OperationContext* txn,
+bool CollectionShardingState::_checkShardVersionOk(OperationContext* opCtx,
string* errmsg,
ChunkVersion* expectedShardVersion,
ChunkVersion* actualShardVersion) {
- Client* client = txn->getClient();
+ Client* client = opCtx->getClient();
// Operations using the DBDirectClient are unversioned.
if (client->isInDirectClient()) {
return true;
}
- if (!repl::ReplicationCoordinator::get(txn)->canAcceptWritesForDatabase(txn, _nss.db())) {
+ if (!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase(opCtx, _nss.db())) {
// Right now connections to secondaries aren't versioned at all.
return true;
}
- const auto& oss = OperationShardingState::get(txn);
+ const auto& oss = OperationShardingState::get(opCtx);
// If there is a version attached to the OperationContext, use it as the received version.
// Otherwise, get the received version from the ShardedConnectionInfo.
@@ -311,8 +315,9 @@ bool CollectionShardingState::_checkShardVersionOk(OperationContext* txn,
} else {
ShardedConnectionInfo* info = ShardedConnectionInfo::get(client, false);
if (!info) {
- // There is no shard version information on either 'txn' or 'client'. This means that
- // the operation represented by 'txn' is unversioned, and the shard version is always OK
+ // There is no shard version information on either 'opCtx' or 'client'. This means that
+ // the operation represented by 'opCtx' is unversioned, and the shard version is always
+ // OK
// for unversioned operations.
return true;
}
@@ -333,7 +338,7 @@ bool CollectionShardingState::_checkShardVersionOk(OperationContext* txn,
// Set migration critical section on operation sharding state: operation will wait for the
// migration to finish before returning failure and retrying.
- OperationShardingState::get(txn).setMigrationCriticalSectionSignal(
+ OperationShardingState::get(opCtx).setMigrationCriticalSectionSignal(
_sourceMgr->getMigrationCriticalSectionSignal());
return false;
}
diff --git a/src/mongo/db/s/collection_sharding_state.h b/src/mongo/db/s/collection_sharding_state.h
index 65e7bf6a882..5bbc2b9c576 100644
--- a/src/mongo/db/s/collection_sharding_state.h
+++ b/src/mongo/db/s/collection_sharding_state.h
@@ -81,8 +81,8 @@ public:
* Must be called with some lock held on the specific collection being looked up and the
* returned pointer should never be stored.
*/
- static CollectionShardingState* get(OperationContext* txn, const NamespaceString& nss);
- static CollectionShardingState* get(OperationContext* txn, const std::string& ns);
+ static CollectionShardingState* get(OperationContext* opCtx, const NamespaceString& nss);
+ static CollectionShardingState* get(OperationContext* opCtx, const std::string& ns);
/**
* Returns the chunk metadata for the collection.
@@ -96,7 +96,7 @@ public:
*
* Must always be called with an exclusive collection lock.
*/
- void refreshMetadata(OperationContext* txn, std::unique_ptr<CollectionMetadata> newMetadata);
+ void refreshMetadata(OperationContext* opCtx, std::unique_ptr<CollectionMetadata> newMetadata);
/**
* Marks the collection as not sharded at stepdown time so that no filtering will occur for
@@ -128,14 +128,14 @@ public:
* collection X lock. May not be called if there is a migration source manager already
* installed. Must be followed by a call to clearMigrationSourceManager.
*/
- void setMigrationSourceManager(OperationContext* txn, MigrationSourceManager* sourceMgr);
+ void setMigrationSourceManager(OperationContext* opCtx, MigrationSourceManager* sourceMgr);
/**
* Removes a migration source manager from this collection's sharding state. Must be called with
* collection X lock. May not be called if there isn't a migration source manager installed
* already through a previous call to setMigrationSourceManager.
*/
- void clearMigrationSourceManager(OperationContext* txn);
+ void clearMigrationSourceManager(OperationContext* opCtx);
/**
* Checks whether the shard version in the context is compatible with the shard version of the
@@ -146,7 +146,7 @@ public:
* response is constructed, this function should be the only means of checking for shard version
* match.
*/
- void checkShardVersionOrThrow(OperationContext* txn);
+ void checkShardVersionOrThrow(OperationContext* opCtx);
/**
* Returns whether this collection is sharded. Valid only if mongoD is primary.
@@ -157,15 +157,15 @@ public:
// Replication subsystem hooks. If this collection is serving as a source for migration, these
// methods inform it of any changes to its contents.
- bool isDocumentInMigratingChunk(OperationContext* txn, const BSONObj& doc);
+ bool isDocumentInMigratingChunk(OperationContext* opCtx, const BSONObj& doc);
- void onInsertOp(OperationContext* txn, const BSONObj& insertedDoc);
+ void onInsertOp(OperationContext* opCtx, const BSONObj& insertedDoc);
- void onUpdateOp(OperationContext* txn, const BSONObj& updatedDoc);
+ void onUpdateOp(OperationContext* opCtx, const BSONObj& updatedDoc);
- void onDeleteOp(OperationContext* txn, const DeleteState& deleteState);
+ void onDeleteOp(OperationContext* opCtx, const DeleteState& deleteState);
- void onDropCollection(OperationContext* txn, const NamespaceString& collectionName);
+ void onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName);
MetadataManager* getMetadataManagerForTest() {
return &_metadataManager;
@@ -176,7 +176,7 @@ private:
/**
* Checks whether the shard version of the operation matches that of the collection.
*
- * txn - Operation context from which to retrieve the operation's expected version.
+ * opCtx - Operation context from which to retrieve the operation's expected version.
* errmsg (out) - On false return contains an explanatory error message.
* expectedShardVersion (out) - On false return contains the expected collection version on this
* shard. Obtained from the operation sharding state.
@@ -186,7 +186,7 @@ private:
* Returns true if the expected collection version on the shard matches its actual version on
* the shard and false otherwise. Upon false return, the output parameters will be set.
*/
- bool _checkShardVersionOk(OperationContext* txn,
+ bool _checkShardVersionOk(OperationContext* opCtx,
std::string* errmsg,
ChunkVersion* expectedShardVersion,
ChunkVersion* actualShardVersion);
diff --git a/src/mongo/db/s/collection_sharding_state_test.cpp b/src/mongo/db/s/collection_sharding_state_test.cpp
index 8a7ca715141..51666a7800f 100644
--- a/src/mongo/db/s/collection_sharding_state_test.cpp
+++ b/src/mongo/db/s/collection_sharding_state_test.cpp
@@ -61,7 +61,7 @@ public:
// Note: this assumes that globalInit will always be called on the same thread as the main
// test thread.
- ShardingState::get(txn())->setGlobalInitMethodForTest(
+ ShardingState::get(opCtx())->setGlobalInitMethodForTest(
[this](OperationContext*, const ConnectionString&, StringData) {
_initCallCount++;
return Status::OK();
@@ -70,7 +70,7 @@ public:
void tearDown() override {}
- OperationContext* txn() {
+ OperationContext* opCtx() {
return _opCtx.get();
}
@@ -102,8 +102,8 @@ TEST_F(CollShardingStateTest, GlobalInitGetsCalledAfterWriteCommits) {
shardIdentity.setShardName("a");
shardIdentity.setClusterId(OID::gen());
- WriteUnitOfWork wuow(txn());
- collShardingState.onInsertOp(txn(), shardIdentity.toBSON());
+ WriteUnitOfWork wuow(opCtx());
+ collShardingState.onInsertOp(opCtx(), shardIdentity.toBSON());
ASSERT_EQ(0, getInitCallCount());
@@ -123,8 +123,8 @@ TEST_F(CollShardingStateTest, GlobalInitDoesntGetCalledIfWriteAborts) {
shardIdentity.setClusterId(OID::gen());
{
- WriteUnitOfWork wuow(txn());
- collShardingState.onInsertOp(txn(), shardIdentity.toBSON());
+ WriteUnitOfWork wuow(opCtx());
+ collShardingState.onInsertOp(opCtx(), shardIdentity.toBSON());
ASSERT_EQ(0, getInitCallCount());
}
@@ -141,8 +141,8 @@ TEST_F(CollShardingStateTest, GlobalInitDoesntGetsCalledIfNSIsNotForShardIdentit
shardIdentity.setShardName("a");
shardIdentity.setClusterId(OID::gen());
- WriteUnitOfWork wuow(txn());
- collShardingState.onInsertOp(txn(), shardIdentity.toBSON());
+ WriteUnitOfWork wuow(opCtx());
+ collShardingState.onInsertOp(opCtx(), shardIdentity.toBSON());
ASSERT_EQ(0, getInitCallCount());
@@ -158,15 +158,16 @@ TEST_F(CollShardingStateTest, OnInsertOpThrowWithIncompleteShardIdentityDocument
ShardIdentityType shardIdentity;
shardIdentity.setShardName("a");
- ASSERT_THROWS(collShardingState.onInsertOp(txn(), shardIdentity.toBSON()), AssertionException);
+ ASSERT_THROWS(collShardingState.onInsertOp(opCtx(), shardIdentity.toBSON()),
+ AssertionException);
}
TEST_F(CollShardingStateTest, GlobalInitDoesntGetsCalledIfShardIdentityDocWasNotInserted) {
CollectionShardingState collShardingState(getServiceContext(),
NamespaceString::kConfigCollectionNamespace);
- WriteUnitOfWork wuow(txn());
- collShardingState.onInsertOp(txn(), BSON("_id" << 1));
+ WriteUnitOfWork wuow(opCtx());
+ collShardingState.onInsertOp(opCtx(), BSON("_id" << 1));
ASSERT_EQ(0, getInitCallCount());
diff --git a/src/mongo/db/s/config/configsvr_add_shard_command.cpp b/src/mongo/db/s/config/configsvr_add_shard_command.cpp
index 9f9b349b4df..5cfc614a816 100644
--- a/src/mongo/db/s/config/configsvr_add_shard_command.cpp
+++ b/src/mongo/db/s/config/configsvr_add_shard_command.cpp
@@ -86,7 +86,7 @@ public:
return Status::OK();
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& unusedDbName,
BSONObj& cmdObj,
int options,
@@ -105,7 +105,7 @@ public:
}
auto parsedRequest = std::move(swParsedRequest.getValue());
- auto replCoord = repl::ReplicationCoordinator::get(txn);
+ auto replCoord = repl::ReplicationCoordinator::get(opCtx);
auto rsConfig = replCoord->getConfig();
auto validationStatus = parsedRequest.validate(rsConfig.isLocalHostAllowed());
@@ -119,8 +119,8 @@ public:
parsedRequest.hasMaxSize() ? parsedRequest.getMaxSize()
: kMaxSizeMBDefault);
- StatusWith<string> addShardResult = Grid::get(txn)->catalogManager()->addShard(
- txn,
+ StatusWith<string> addShardResult = Grid::get(opCtx)->catalogManager()->addShard(
+ opCtx,
parsedRequest.hasName() ? &parsedRequest.getName() : nullptr,
parsedRequest.getConnString(),
parsedRequest.hasMaxSize() ? parsedRequest.getMaxSize() : kMaxSizeMBDefault);
diff --git a/src/mongo/db/s/config/configsvr_add_shard_to_zone_command.cpp b/src/mongo/db/s/config/configsvr_add_shard_to_zone_command.cpp
index 1b0a3db4148..236b2409af1 100644
--- a/src/mongo/db/s/config/configsvr_add_shard_to_zone_command.cpp
+++ b/src/mongo/db/s/config/configsvr_add_shard_to_zone_command.cpp
@@ -87,7 +87,7 @@ public:
return Status::OK();
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& unusedDbName,
BSONObj& cmdObj,
int options,
@@ -100,8 +100,8 @@ public:
auto parsedRequest = uassertStatusOK(AddShardToZoneRequest::parseFromConfigCommand(cmdObj));
- uassertStatusOK(Grid::get(txn)->catalogManager()->addShardToZone(
- txn, parsedRequest.getShardName(), parsedRequest.getZoneName()));
+ uassertStatusOK(Grid::get(opCtx)->catalogManager()->addShardToZone(
+ opCtx, parsedRequest.getShardName(), parsedRequest.getZoneName()));
return true;
}
diff --git a/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp b/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
index 6e0f96328ee..5144be21703 100644
--- a/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
+++ b/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
@@ -117,7 +117,7 @@ public:
return parseNsFullyQualified(dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbName,
BSONObj& cmdObj,
int options,
@@ -129,8 +129,8 @@ public:
auto commitRequest =
uassertStatusOK(CommitChunkMigrationRequest::createFromCommand(nss, cmdObj));
- StatusWith<BSONObj> response = Grid::get(txn)->catalogManager()->commitChunkMigration(
- txn,
+ StatusWith<BSONObj> response = Grid::get(opCtx)->catalogManager()->commitChunkMigration(
+ opCtx,
nss,
commitRequest.getMigratedChunk(),
commitRequest.getControlChunk(),
diff --git a/src/mongo/db/s/config/configsvr_control_balancer_command.cpp b/src/mongo/db/s/config/configsvr_control_balancer_command.cpp
index 88deff30a9e..f4905406f78 100644
--- a/src/mongo/db/s/config/configsvr_control_balancer_command.cpp
+++ b/src/mongo/db/s/config/configsvr_control_balancer_command.cpp
@@ -72,7 +72,7 @@ public:
return Status::OK();
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& unusedDbName,
BSONObj& cmdObj,
int options,
@@ -87,13 +87,13 @@ public:
str::stream() << getName() << " can only be run on config servers",
serverGlobalParams.clusterRole == ClusterRole::ConfigServer);
- _run(txn, &result);
+ _run(opCtx, &result);
return true;
}
private:
- virtual void _run(OperationContext* txn, BSONObjBuilder* result) = 0;
+ virtual void _run(OperationContext* opCtx, BSONObjBuilder* result) = 0;
};
class ConfigSvrBalancerStartCommand : public ConfigSvrBalancerControlCommand {
@@ -101,9 +101,9 @@ public:
ConfigSvrBalancerStartCommand() : ConfigSvrBalancerControlCommand("_configsvrBalancerStart") {}
private:
- void _run(OperationContext* txn, BSONObjBuilder* result) override {
- uassertStatusOK(Grid::get(txn)->getBalancerConfiguration()->setBalancerMode(
- txn, BalancerSettingsType::kFull));
+ void _run(OperationContext* opCtx, BSONObjBuilder* result) override {
+ uassertStatusOK(Grid::get(opCtx)->getBalancerConfiguration()->setBalancerMode(
+ opCtx, BalancerSettingsType::kFull));
}
};
@@ -112,10 +112,10 @@ public:
ConfigSvrBalancerStopCommand() : ConfigSvrBalancerControlCommand("_configsvrBalancerStop") {}
private:
- void _run(OperationContext* txn, BSONObjBuilder* result) override {
- uassertStatusOK(Grid::get(txn)->getBalancerConfiguration()->setBalancerMode(
- txn, BalancerSettingsType::kOff));
- Balancer::get(txn)->joinCurrentRound(txn);
+ void _run(OperationContext* opCtx, BSONObjBuilder* result) override {
+ uassertStatusOK(Grid::get(opCtx)->getBalancerConfiguration()->setBalancerMode(
+ opCtx, BalancerSettingsType::kOff));
+ Balancer::get(opCtx)->joinCurrentRound(opCtx);
}
};
@@ -125,8 +125,8 @@ public:
: ConfigSvrBalancerControlCommand("_configsvrBalancerStatus") {}
private:
- void _run(OperationContext* txn, BSONObjBuilder* result) override {
- Balancer::get(txn)->report(txn, result);
+ void _run(OperationContext* opCtx, BSONObjBuilder* result) override {
+ Balancer::get(opCtx)->report(opCtx, result);
}
};
diff --git a/src/mongo/db/s/config/configsvr_merge_chunk_command.cpp b/src/mongo/db/s/config/configsvr_merge_chunk_command.cpp
index 8988a5e4e4e..7d4dfc12b87 100644
--- a/src/mongo/db/s/config/configsvr_merge_chunk_command.cpp
+++ b/src/mongo/db/s/config/configsvr_merge_chunk_command.cpp
@@ -98,7 +98,7 @@ public:
return parseNsFullyQualified(dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbName,
BSONObj& cmdObj,
int options,
@@ -112,11 +112,11 @@ public:
auto parsedRequest = uassertStatusOK(MergeChunkRequest::parseFromConfigCommand(cmdObj));
Status mergeChunkResult =
- Grid::get(txn)->catalogManager()->commitChunkMerge(txn,
- parsedRequest.getNamespace(),
- parsedRequest.getEpoch(),
- parsedRequest.getChunkBoundaries(),
- parsedRequest.getShardName());
+ Grid::get(opCtx)->catalogManager()->commitChunkMerge(opCtx,
+ parsedRequest.getNamespace(),
+ parsedRequest.getEpoch(),
+ parsedRequest.getChunkBoundaries(),
+ parsedRequest.getShardName());
if (!mergeChunkResult.isOK()) {
return appendCommandStatus(result, mergeChunkResult);
diff --git a/src/mongo/db/s/config/configsvr_move_chunk_command.cpp b/src/mongo/db/s/config/configsvr_move_chunk_command.cpp
index 0e64207a217..8b4fe32025e 100644
--- a/src/mongo/db/s/config/configsvr_move_chunk_command.cpp
+++ b/src/mongo/db/s/config/configsvr_move_chunk_command.cpp
@@ -78,7 +78,7 @@ public:
return Status::OK();
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& unusedDbName,
BSONObj& cmdObj,
int options,
@@ -87,14 +87,14 @@ public:
auto request = uassertStatusOK(BalanceChunkRequest::parseFromConfigCommand(cmdObj));
if (request.hasToShardId()) {
- uassertStatusOK(Balancer::get(txn)->moveSingleChunk(txn,
- request.getChunk(),
- request.getToShardId(),
- request.getMaxChunkSizeBytes(),
- request.getSecondaryThrottle(),
- request.getWaitForDelete()));
+ uassertStatusOK(Balancer::get(opCtx)->moveSingleChunk(opCtx,
+ request.getChunk(),
+ request.getToShardId(),
+ request.getMaxChunkSizeBytes(),
+ request.getSecondaryThrottle(),
+ request.getWaitForDelete()));
} else {
- uassertStatusOK(Balancer::get(txn)->rebalanceSingleChunk(txn, request.getChunk()));
+ uassertStatusOK(Balancer::get(opCtx)->rebalanceSingleChunk(opCtx, request.getChunk()));
}
return true;
diff --git a/src/mongo/db/s/config/configsvr_remove_shard_from_zone_command.cpp b/src/mongo/db/s/config/configsvr_remove_shard_from_zone_command.cpp
index 376bf5cfd5f..ae83006e471 100644
--- a/src/mongo/db/s/config/configsvr_remove_shard_from_zone_command.cpp
+++ b/src/mongo/db/s/config/configsvr_remove_shard_from_zone_command.cpp
@@ -87,7 +87,7 @@ public:
return Status::OK();
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& unusedDbName,
BSONObj& cmdObj,
int options,
@@ -101,8 +101,8 @@ public:
auto parsedRequest =
uassertStatusOK(RemoveShardFromZoneRequest::parseFromConfigCommand(cmdObj));
- uassertStatusOK(Grid::get(txn)->catalogManager()->removeShardFromZone(
- txn, parsedRequest.getShardName(), parsedRequest.getZoneName()));
+ uassertStatusOK(Grid::get(opCtx)->catalogManager()->removeShardFromZone(
+ opCtx, parsedRequest.getShardName(), parsedRequest.getZoneName()));
return true;
}
diff --git a/src/mongo/db/s/config/configsvr_set_feature_compatibility_version_command.cpp b/src/mongo/db/s/config/configsvr_set_feature_compatibility_version_command.cpp
index 694cec5f96c..8e1fa7825e8 100644
--- a/src/mongo/db/s/config/configsvr_set_feature_compatibility_version_command.cpp
+++ b/src/mongo/db/s/config/configsvr_set_feature_compatibility_version_command.cpp
@@ -81,7 +81,7 @@ public:
return Status::OK();
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& unusedDbName,
BSONObj& cmdObj,
int options,
@@ -97,11 +97,11 @@ public:
serverGlobalParams.clusterRole == ClusterRole::ConfigServer);
// Forward to all shards.
- uassertStatusOK(
- Grid::get(txn)->catalogManager()->setFeatureCompatibilityVersionOnShards(txn, version));
+ uassertStatusOK(Grid::get(opCtx)->catalogManager()->setFeatureCompatibilityVersionOnShards(
+ opCtx, version));
// On success, set featureCompatibilityVersion on self.
- FeatureCompatibilityVersion::set(txn, version);
+ FeatureCompatibilityVersion::set(opCtx, version);
return true;
}
diff --git a/src/mongo/db/s/config/configsvr_split_chunk_command.cpp b/src/mongo/db/s/config/configsvr_split_chunk_command.cpp
index a8744987929..0b3cfe6f40d 100644
--- a/src/mongo/db/s/config/configsvr_split_chunk_command.cpp
+++ b/src/mongo/db/s/config/configsvr_split_chunk_command.cpp
@@ -96,7 +96,7 @@ public:
return parseNsFullyQualified(dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbName,
BSONObj& cmdObj,
int options,
@@ -110,12 +110,12 @@ public:
auto parsedRequest = uassertStatusOK(SplitChunkRequest::parseFromConfigCommand(cmdObj));
Status splitChunkResult =
- Grid::get(txn)->catalogManager()->commitChunkSplit(txn,
- parsedRequest.getNamespace(),
- parsedRequest.getEpoch(),
- parsedRequest.getChunkRange(),
- parsedRequest.getSplitPoints(),
- parsedRequest.getShardName());
+ Grid::get(opCtx)->catalogManager()->commitChunkSplit(opCtx,
+ parsedRequest.getNamespace(),
+ parsedRequest.getEpoch(),
+ parsedRequest.getChunkRange(),
+ parsedRequest.getSplitPoints(),
+ parsedRequest.getShardName());
if (!splitChunkResult.isOK()) {
return appendCommandStatus(result, splitChunkResult);
}
diff --git a/src/mongo/db/s/config/configsvr_update_zone_key_range_command.cpp b/src/mongo/db/s/config/configsvr_update_zone_key_range_command.cpp
index 09ff5f8bf74..36d68576568 100644
--- a/src/mongo/db/s/config/configsvr_update_zone_key_range_command.cpp
+++ b/src/mongo/db/s/config/configsvr_update_zone_key_range_command.cpp
@@ -89,7 +89,7 @@ public:
return Status::OK();
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& unusedDbName,
BSONObj& cmdObj,
int options,
@@ -109,11 +109,11 @@ public:
}
if (parsedRequest.isRemove()) {
- uassertStatusOK(Grid::get(txn)->catalogManager()->removeKeyRangeFromZone(
- txn, parsedRequest.getNS(), parsedRequest.getRange()));
+ uassertStatusOK(Grid::get(opCtx)->catalogManager()->removeKeyRangeFromZone(
+ opCtx, parsedRequest.getNS(), parsedRequest.getRange()));
} else {
- uassertStatusOK(Grid::get(txn)->catalogManager()->assignKeyRangeToZone(
- txn, parsedRequest.getNS(), parsedRequest.getRange(), zoneName));
+ uassertStatusOK(Grid::get(opCtx)->catalogManager()->assignKeyRangeToZone(
+ opCtx, parsedRequest.getNS(), parsedRequest.getRange(), zoneName));
}
return true;
diff --git a/src/mongo/db/s/get_shard_version_command.cpp b/src/mongo/db/s/get_shard_version_command.cpp
index a14732867d7..86796a4ef50 100644
--- a/src/mongo/db/s/get_shard_version_command.cpp
+++ b/src/mongo/db/s/get_shard_version_command.cpp
@@ -82,7 +82,7 @@ public:
return parseNsFullyQualified(dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -93,14 +93,14 @@ public:
str::stream() << nss.ns() << " is not a valid namespace",
nss.isValid());
- ShardingState* const gss = ShardingState::get(txn);
+ ShardingState* const gss = ShardingState::get(opCtx);
if (gss->enabled()) {
- result.append("configServer", gss->getConfigServer(txn).toString());
+ result.append("configServer", gss->getConfigServer(opCtx).toString());
} else {
result.append("configServer", "");
}
- ShardedConnectionInfo* const sci = ShardedConnectionInfo::get(txn->getClient(), false);
+ ShardedConnectionInfo* const sci = ShardedConnectionInfo::get(opCtx->getClient(), false);
result.appendBool("inShardedMode", sci != nullptr);
if (sci) {
result.appendTimestamp("mine", sci->getVersion(nss.ns()).toLong());
@@ -108,8 +108,8 @@ public:
result.appendTimestamp("mine", 0);
}
- AutoGetCollection autoColl(txn, nss, MODE_IS);
- CollectionShardingState* const css = CollectionShardingState::get(txn, nss);
+ AutoGetCollection autoColl(opCtx, nss, MODE_IS);
+ CollectionShardingState* const css = CollectionShardingState::get(opCtx, nss);
ScopedCollectionMetadata metadata;
if (css) {
diff --git a/src/mongo/db/s/merge_chunks_command.cpp b/src/mongo/db/s/merge_chunks_command.cpp
index 242c9d5fc7f..f03e7948374 100644
--- a/src/mongo/db/s/merge_chunks_command.cpp
+++ b/src/mongo/db/s/merge_chunks_command.cpp
@@ -57,16 +57,16 @@ using std::vector;
namespace {
-bool _checkMetadataForSuccess(OperationContext* txn,
+bool _checkMetadataForSuccess(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& minKey,
const BSONObj& maxKey) {
ScopedCollectionMetadata metadataAfterMerge;
{
- AutoGetCollection autoColl(txn, nss, MODE_IS);
+ AutoGetCollection autoColl(opCtx, nss, MODE_IS);
// Get collection metadata
- metadataAfterMerge = CollectionShardingState::get(txn, nss.ns())->getMetadata();
+ metadataAfterMerge = CollectionShardingState::get(opCtx, nss.ns())->getMetadata();
}
ChunkType chunk;
@@ -77,7 +77,7 @@ bool _checkMetadataForSuccess(OperationContext* txn,
return chunk.getMin().woCompare(minKey) == 0 && chunk.getMax().woCompare(maxKey) == 0;
}
-Status mergeChunks(OperationContext* txn,
+Status mergeChunks(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& minKey,
const BSONObj& maxKey,
@@ -86,8 +86,8 @@ Status mergeChunks(OperationContext* txn,
// TODO(SERVER-25086): Remove distLock acquisition from merge chunk
const string whyMessage = stream() << "merging chunks in " << nss.ns() << " from " << minKey
<< " to " << maxKey;
- auto scopedDistLock = grid.catalogClient(txn)->getDistLockManager()->lock(
- txn, nss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout);
+ auto scopedDistLock = grid.catalogClient(opCtx)->getDistLockManager()->lock(
+ opCtx, nss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout);
if (!scopedDistLock.isOK()) {
std::string errmsg = stream() << "could not acquire collection lock for " << nss.ns()
@@ -99,14 +99,14 @@ Status mergeChunks(OperationContext* txn,
return Status(scopedDistLock.getStatus().code(), errmsg);
}
- ShardingState* shardingState = ShardingState::get(txn);
+ ShardingState* shardingState = ShardingState::get(opCtx);
//
// We now have the collection lock, refresh metadata to latest version and sanity check
//
ChunkVersion shardVersion;
- Status refreshStatus = shardingState->refreshMetadataNow(txn, nss, &shardVersion);
+ Status refreshStatus = shardingState->refreshMetadataNow(opCtx, nss, &shardVersion);
if (!refreshStatus.isOK()) {
std::string errmsg = str::stream()
@@ -130,9 +130,9 @@ Status mergeChunks(OperationContext* txn,
ScopedCollectionMetadata metadata;
{
- AutoGetCollection autoColl(txn, nss, MODE_IS);
+ AutoGetCollection autoColl(opCtx, nss, MODE_IS);
- metadata = CollectionShardingState::get(txn, nss.ns())->getMetadata();
+ metadata = CollectionShardingState::get(opCtx, nss.ns())->getMetadata();
if (!metadata || metadata->getKeyPattern().isEmpty()) {
std::string errmsg = stream() << "could not merge chunks, collection " << nss.ns()
<< " is not sharded";
@@ -262,8 +262,8 @@ Status mergeChunks(OperationContext* txn,
auto configCmdObj =
request.toConfigCommandBSON(ShardingCatalogClient::kMajorityWriteConcern.toBSON());
- auto cmdResponseStatus = Grid::get(txn)->shardRegistry()->getConfigShard()->runCommand(
- txn,
+ auto cmdResponseStatus = Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommand(
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"admin",
configCmdObj,
@@ -275,7 +275,7 @@ Status mergeChunks(OperationContext* txn,
//
{
ChunkVersion shardVersionAfterMerge;
- refreshStatus = shardingState->refreshMetadataNow(txn, nss, &shardVersionAfterMerge);
+ refreshStatus = shardingState->refreshMetadataNow(opCtx, nss, &shardVersionAfterMerge);
if (!refreshStatus.isOK()) {
std::string errmsg = str::stream() << "failed to refresh metadata for merge chunk ["
@@ -301,7 +301,7 @@ Status mergeChunks(OperationContext* txn,
auto writeConcernStatus = std::move(cmdResponseStatus.getValue().writeConcernStatus);
if ((!commandStatus.isOK() || !writeConcernStatus.isOK()) &&
- _checkMetadataForSuccess(txn, nss, minKey, maxKey)) {
+ _checkMetadataForSuccess(opCtx, nss, minKey, maxKey)) {
LOG(1) << "mergeChunk [" << redact(minKey) << "," << redact(maxKey)
<< ") has already been committed.";
@@ -360,13 +360,13 @@ public:
// Optional, if the merge is only valid for a particular epoch
static BSONField<OID> epochField;
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
string& errmsg,
BSONObjBuilder& result) override {
- uassertStatusOK(ShardingState::get(txn)->canAcceptShardedCommands());
+ uassertStatusOK(ShardingState::get(opCtx)->canAcceptShardedCommands());
string ns = parseNs(dbname, cmdObj);
@@ -409,7 +409,7 @@ public:
return false;
}
- auto mergeStatus = mergeChunks(txn, NamespaceString(ns), minKey, maxKey, epoch);
+ auto mergeStatus = mergeChunks(opCtx, NamespaceString(ns), minKey, maxKey, epoch);
return appendCommandStatus(result, mergeStatus);
}
} mergeChunksCmd;
diff --git a/src/mongo/db/s/metadata_loader.cpp b/src/mongo/db/s/metadata_loader.cpp
index f337e56224b..8385ea5c3d6 100644
--- a/src/mongo/db/s/metadata_loader.cpp
+++ b/src/mongo/db/s/metadata_loader.cpp
@@ -77,12 +77,12 @@ public:
return chunk.getShard() == _currShard;
}
- virtual pair<BSONObj, CachedChunkInfo> rangeFor(OperationContext* txn,
+ virtual pair<BSONObj, CachedChunkInfo> rangeFor(OperationContext* opCtx,
const ChunkType& chunk) const {
return make_pair(chunk.getMin(), CachedChunkInfo(chunk.getMax(), chunk.getVersion()));
}
- virtual ShardId shardFor(OperationContext* txn, const ShardId& name) const {
+ virtual ShardId shardFor(OperationContext* opCtx, const ShardId& name) const {
return name;
}
@@ -96,27 +96,27 @@ private:
} // namespace
-Status MetadataLoader::makeCollectionMetadata(OperationContext* txn,
+Status MetadataLoader::makeCollectionMetadata(OperationContext* opCtx,
ShardingCatalogClient* catalogClient,
const string& ns,
const string& shard,
const CollectionMetadata* oldMetadata,
CollectionMetadata* metadata) {
- Status initCollectionStatus = _initCollection(txn, catalogClient, ns, shard, metadata);
+ Status initCollectionStatus = _initCollection(opCtx, catalogClient, ns, shard, metadata);
if (!initCollectionStatus.isOK()) {
return initCollectionStatus;
}
- return _initChunks(txn, catalogClient, ns, shard, oldMetadata, metadata);
+ return _initChunks(opCtx, catalogClient, ns, shard, oldMetadata, metadata);
}
-Status MetadataLoader::_initCollection(OperationContext* txn,
+Status MetadataLoader::_initCollection(OperationContext* opCtx,
ShardingCatalogClient* catalogClient,
const string& ns,
const string& shard,
CollectionMetadata* metadata) {
// Get the config.collections entry for 'ns'.
- auto coll = catalogClient->getCollection(txn, ns);
+ auto coll = catalogClient->getCollection(opCtx, ns);
if (!coll.isOK()) {
return coll.getStatus();
}
@@ -138,7 +138,7 @@ Status MetadataLoader::_initCollection(OperationContext* txn,
return Status::OK();
}
-Status MetadataLoader::_initChunks(OperationContext* txn,
+Status MetadataLoader::_initChunks(OperationContext* opCtx,
ShardingCatalogClient* catalogClient,
const string& ns,
const string& shard,
@@ -186,7 +186,7 @@ Status MetadataLoader::_initChunks(OperationContext* txn,
const auto diffQuery = SCMConfigDiffTracker::createConfigDiffQuery(NamespaceString(ns),
metadata->_collVersion);
std::vector<ChunkType> chunks;
- Status status = catalogClient->getChunks(txn,
+ Status status = catalogClient->getChunks(opCtx,
diffQuery.query,
diffQuery.sort,
boost::none,
@@ -200,7 +200,7 @@ Status MetadataLoader::_initChunks(OperationContext* txn,
// If we are the primary, or a standalone, persist new chunks locally.
status = _writeNewChunksIfPrimary(
- txn, NamespaceString(ns), chunks, metadata->_collVersion.epoch());
+ opCtx, NamespaceString(ns), chunks, metadata->_collVersion.epoch());
if (!status.isOK()) {
return status;
}
@@ -210,7 +210,7 @@ Status MetadataLoader::_initChunks(OperationContext* txn,
// last time). If not, something has changed on the config server (potentially between
// when we read the collection data and when we read the chunks data).
//
- int diffsApplied = differ.calculateConfigDiff(txn, chunks);
+ int diffsApplied = differ.calculateConfigDiff(opCtx, chunks);
if (diffsApplied > 0) {
// Chunks found, return ok
LOG(2) << "loaded " << diffsApplied << " chunks into new metadata for " << ns
@@ -253,7 +253,7 @@ Status MetadataLoader::_initChunks(OperationContext* txn,
}
}
-Status MetadataLoader::_writeNewChunksIfPrimary(OperationContext* txn,
+Status MetadataLoader::_writeNewChunksIfPrimary(OperationContext* opCtx,
const NamespaceString& nss,
const std::vector<ChunkType>& chunks,
const OID& currEpoch) {
@@ -261,13 +261,13 @@ Status MetadataLoader::_writeNewChunksIfPrimary(OperationContext* txn,
// Only do the write(s) if this is a primary or standalone. Otherwise, return OK.
if (serverGlobalParams.clusterRole != ClusterRole::ShardServer ||
- !repl::ReplicationCoordinator::get(txn)->canAcceptWritesForDatabase_UNSAFE(
- txn, chunkMetadataNss.ns())) {
+ !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase_UNSAFE(
+ opCtx, chunkMetadataNss.ns())) {
return Status::OK();
}
try {
- DBDirectClient client(txn);
+ DBDirectClient client(opCtx);
/**
* Here are examples of the operations that can happen on the config server to update
diff --git a/src/mongo/db/s/metadata_loader.h b/src/mongo/db/s/metadata_loader.h
index 8c12233da2e..cfeb6348cfb 100644
--- a/src/mongo/db/s/metadata_loader.h
+++ b/src/mongo/db/s/metadata_loader.h
@@ -56,7 +56,7 @@ class OperationContext;
* Example usage:
* beforeMetadata = <get latest local metadata>;
* remoteMetadata = makeCollectionMetadata( beforeMetadata, remoteMetadata );
- * DBLock lock(txn, dbname, MODE_X);
+ * DBLock lock(opCtx, dbname, MODE_X);
* afterMetadata = <get latest local metadata>;
*
* The loader will go out of its way to try to fetch the smaller amount possible of data
@@ -85,7 +85,7 @@ public:
* @return HostUnreachable if there was an error contacting the config servers
* @return RemoteChangeDetected if the data loaded was modified by another operation
*/
- static Status makeCollectionMetadata(OperationContext* txn,
+ static Status makeCollectionMetadata(OperationContext* opCtx,
ShardingCatalogClient* catalogClient,
const std::string& ns,
const std::string& shard,
@@ -104,7 +104,7 @@ private:
* @return RemoteChangeDetected if the collection doc loaded is unexpectedly different
*
*/
- static Status _initCollection(OperationContext* txn,
+ static Status _initCollection(OperationContext* opCtx,
ShardingCatalogClient* catalogClient,
const std::string& ns,
const std::string& shard,
@@ -123,7 +123,7 @@ private:
* @return NamespaceNotFound if there are no chunks loaded and an epoch change is detected
* TODO: @return FailedToParse
*/
- static Status _initChunks(OperationContext* txn,
+ static Status _initChunks(OperationContext* opCtx,
ShardingCatalogClient* catalogClient,
const std::string& ns,
const std::string& shard,
@@ -148,7 +148,7 @@ private:
* 'currEpoch'
* - Other errors in writes/reads to the config.chunks.ns collection fails.
*/
- static Status _writeNewChunksIfPrimary(OperationContext* txn,
+ static Status _writeNewChunksIfPrimary(OperationContext* opCtx,
const NamespaceString& nss,
const std::vector<ChunkType>& chunks,
const OID& currEpoch);
diff --git a/src/mongo/db/s/metadata_loader_test.cpp b/src/mongo/db/s/metadata_loader_test.cpp
index 964cffe9071..b9d10773563 100644
--- a/src/mongo/db/s/metadata_loader_test.cpp
+++ b/src/mongo/db/s/metadata_loader_test.cpp
@@ -238,10 +238,10 @@ TEST_F(MetadataLoaderTest, NoChunksIsDropped) {
auto future = launchAsync([this] {
ON_BLOCK_EXIT([&] { Client::destroy(); });
Client::initThreadIfNotAlready("Test");
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
CollectionMetadata metadata;
- auto status = MetadataLoader::makeCollectionMetadata(txn.get(),
+ auto status = MetadataLoader::makeCollectionMetadata(opCtx.get(),
catalogClient(),
kNss.ns(),
kShardId.toString(),
@@ -272,10 +272,10 @@ TEST_F(MetadataLoaderTest, CheckNumChunk) {
auto future = launchAsync([this] {
ON_BLOCK_EXIT([&] { Client::destroy(); });
Client::initThreadIfNotAlready("Test");
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
CollectionMetadata metadata;
- auto status = MetadataLoader::makeCollectionMetadata(txn.get(),
+ auto status = MetadataLoader::makeCollectionMetadata(opCtx.get(),
catalogClient(),
kNss.ns(),
kShardId.toString(),
@@ -299,10 +299,10 @@ TEST_F(MetadataLoaderTest, SingleChunkCheckNumChunk) {
auto future = launchAsync([this] {
ON_BLOCK_EXIT([&] { Client::destroy(); });
Client::initThreadIfNotAlready("Test");
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
CollectionMetadata metadata;
- auto status = MetadataLoader::makeCollectionMetadata(txn.get(),
+ auto status = MetadataLoader::makeCollectionMetadata(opCtx.get(),
catalogClient(),
kNss.ns(),
kShardId.toString(),
@@ -326,10 +326,10 @@ TEST_F(MetadataLoaderTest, SeveralChunksCheckNumChunks) {
auto future = launchAsync([this] {
ON_BLOCK_EXIT([&] { Client::destroy(); });
Client::initThreadIfNotAlready("Test");
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
CollectionMetadata metadata;
- auto status = MetadataLoader::makeCollectionMetadata(txn.get(),
+ auto status = MetadataLoader::makeCollectionMetadata(opCtx.get(),
catalogClient(),
kNss.ns(),
kShardId.toString(),
@@ -353,10 +353,10 @@ TEST_F(MetadataLoaderTest, CollectionMetadataSetUp) {
auto future = launchAsync([this] {
ON_BLOCK_EXIT([&] { Client::destroy(); });
Client::initThreadIfNotAlready("Test");
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
CollectionMetadata metadata;
- auto status = MetadataLoader::makeCollectionMetadata(txn.get(),
+ auto status = MetadataLoader::makeCollectionMetadata(opCtx.get(),
catalogClient(),
kNss.ns(),
kShardId.toString(),
diff --git a/src/mongo/db/s/metadata_manager_test.cpp b/src/mongo/db/s/metadata_manager_test.cpp
index 08f26ac3298..4f55d9f6f05 100644
--- a/src/mongo/db/s/metadata_manager_test.cpp
+++ b/src/mongo/db/s/metadata_manager_test.cpp
@@ -251,13 +251,13 @@ TEST_F(MetadataManagerTest, NotificationBlocksUntilDeletion) {
ChunkRange cr1(BSON("key" << 0), BSON("key" << 10));
auto notification = manager.addRangeToClean(cr1);
- auto txn = cc().makeOperationContext().get();
+ auto opCtx = cc().makeOperationContext().get();
// Once the new range deleter is set up, this might fail if the range deleter
// deleted cr1 before we got here...
- ASSERT_FALSE(notification->waitFor(txn, Milliseconds(0)));
+ ASSERT_FALSE(notification->waitFor(opCtx, Milliseconds(0)));
manager.removeRangeToClean(cr1);
- ASSERT_TRUE(notification->waitFor(txn, Milliseconds(0)));
+ ASSERT_TRUE(notification->waitFor(opCtx, Milliseconds(0)));
ASSERT_OK(notification->get());
}
diff --git a/src/mongo/db/s/migration_chunk_cloner_source.h b/src/mongo/db/s/migration_chunk_cloner_source.h
index 04cf9e36df2..50a31da4db6 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source.h
+++ b/src/mongo/db/s/migration_chunk_cloner_source.h
@@ -65,7 +65,7 @@ public:
* NOTE: Must be called without any locks and must succeed, before any other methods are called
* (except for cancelClone and [insert/update/delete]Op).
*/
- virtual Status startClone(OperationContext* txn) = 0;
+ virtual Status startClone(OperationContext* opCtx) = 0;
/**
* Blocking method, which uses some custom selected logic for deciding whether it is appropriate
@@ -77,7 +77,7 @@ public:
*
* NOTE: Must be called without any locks.
*/
- virtual Status awaitUntilCriticalSectionIsAppropriate(OperationContext* txn,
+ virtual Status awaitUntilCriticalSectionIsAppropriate(OperationContext* opCtx,
Milliseconds maxTimeToWait) = 0;
/**
@@ -90,7 +90,7 @@ public:
*
* NOTE: Must be called without any locks.
*/
- virtual Status commitClone(OperationContext* txn) = 0;
+ virtual Status commitClone(OperationContext* opCtx) = 0;
/**
* Tells the recipient to abort the clone and cleanup any unused data. This method's
@@ -98,7 +98,7 @@ public:
*
* NOTE: Must be called without any locks.
*/
- virtual void cancelClone(OperationContext* txn) = 0;
+ virtual void cancelClone(OperationContext* opCtx) = 0;
// These methods are only meaningful for the legacy cloner and they are used as a way to keep a
// running list of changes, which need to be fetched.
@@ -109,7 +109,7 @@ public:
*
* NOTE: Must be called with at least IS lock held on the collection.
*/
- virtual bool isDocumentInMigratingChunk(OperationContext* txn, const BSONObj& doc) = 0;
+ virtual bool isDocumentInMigratingChunk(OperationContext* opCtx, const BSONObj& doc) = 0;
/**
* Notifies this cloner that an insert happened to the collection, which it owns. It is up to
@@ -118,7 +118,7 @@ public:
*
* NOTE: Must be called with at least IX lock held on the collection.
*/
- virtual void onInsertOp(OperationContext* txn, const BSONObj& insertedDoc) = 0;
+ virtual void onInsertOp(OperationContext* opCtx, const BSONObj& insertedDoc) = 0;
/**
* Notifies this cloner that an update happened to the collection, which it owns. It is up to
@@ -127,7 +127,7 @@ public:
*
* NOTE: Must be called with at least IX lock held on the collection.
*/
- virtual void onUpdateOp(OperationContext* txn, const BSONObj& updatedDoc) = 0;
+ virtual void onUpdateOp(OperationContext* opCtx, const BSONObj& updatedDoc) = 0;
/**
* Notifies this cloner that a delede happened to the collection, which it owns. It is up to the
@@ -136,7 +136,7 @@ public:
*
* NOTE: Must be called with at least IX lock held on the collection.
*/
- virtual void onDeleteOp(OperationContext* txn, const BSONObj& deletedDocId) = 0;
+ virtual void onDeleteOp(OperationContext* opCtx, const BSONObj& deletedDocId) = 0;
protected:
MigrationChunkClonerSource();
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index ac6b513a049..9354f60b8e1 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -90,10 +90,10 @@ BSONObj createRequestWithSessionId(StringData commandName,
*/
class DeleteNotificationStage final : public PlanStage {
public:
- DeleteNotificationStage(MigrationChunkClonerSourceLegacy* cloner, OperationContext* txn)
- : PlanStage("SHARDING_NOTIFY_DELETE", txn), _cloner(cloner) {}
+ DeleteNotificationStage(MigrationChunkClonerSourceLegacy* cloner, OperationContext* opCtx)
+ : PlanStage("SHARDING_NOTIFY_DELETE", opCtx), _cloner(cloner) {}
- void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) override {
+ void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) override {
if (type == INVALIDATION_DELETION) {
stdx::lock_guard<stdx::mutex> sl(_cloner->_mutex);
_cloner->_cloneLocs.erase(dl);
@@ -182,12 +182,12 @@ MigrationChunkClonerSourceLegacy::~MigrationChunkClonerSourceLegacy() {
invariant(!_deleteNotifyExec);
}
-Status MigrationChunkClonerSourceLegacy::startClone(OperationContext* txn) {
+Status MigrationChunkClonerSourceLegacy::startClone(OperationContext* opCtx) {
invariant(_state == kNew);
- invariant(!txn->lockState()->isLocked());
+ invariant(!opCtx->lockState()->isLocked());
// Load the ids of the currently available documents
- auto storeCurrentLocsStatus = _storeCurrentLocs(txn);
+ auto storeCurrentLocsStatus = _storeCurrentLocs(opCtx);
if (!storeCurrentLocsStatus.isOK()) {
return storeCurrentLocsStatus;
}
@@ -223,9 +223,9 @@ Status MigrationChunkClonerSourceLegacy::startClone(OperationContext* txn) {
}
Status MigrationChunkClonerSourceLegacy::awaitUntilCriticalSectionIsAppropriate(
- OperationContext* txn, Milliseconds maxTimeToWait) {
+ OperationContext* opCtx, Milliseconds maxTimeToWait) {
invariant(_state == kCloning);
- invariant(!txn->lockState()->isLocked());
+ invariant(!opCtx->lockState()->isLocked());
const auto startTime = Date_t::now();
@@ -297,7 +297,7 @@ Status MigrationChunkClonerSourceLegacy::awaitUntilCriticalSectionIsAppropriate(
"Aborting migration because of high memory usage"};
}
- Status interruptStatus = txn->checkForInterruptNoAssert();
+ Status interruptStatus = opCtx->checkForInterruptNoAssert();
if (!interruptStatus.isOK()) {
return interruptStatus;
}
@@ -306,23 +306,23 @@ Status MigrationChunkClonerSourceLegacy::awaitUntilCriticalSectionIsAppropriate(
return {ErrorCodes::ExceededTimeLimit, "Timed out waiting for the cloner to catch up"};
}
-Status MigrationChunkClonerSourceLegacy::commitClone(OperationContext* txn) {
+Status MigrationChunkClonerSourceLegacy::commitClone(OperationContext* opCtx) {
invariant(_state == kCloning);
- invariant(!txn->lockState()->isLocked());
+ invariant(!opCtx->lockState()->isLocked());
auto responseStatus =
_callRecipient(createRequestWithSessionId(kRecvChunkCommit, _args.getNss(), _sessionId));
if (responseStatus.isOK()) {
- _cleanup(txn);
+ _cleanup(opCtx);
return Status::OK();
}
- cancelClone(txn);
+ cancelClone(opCtx);
return responseStatus.getStatus();
}
-void MigrationChunkClonerSourceLegacy::cancelClone(OperationContext* txn) {
- invariant(!txn->lockState()->isLocked());
+void MigrationChunkClonerSourceLegacy::cancelClone(OperationContext* opCtx) {
+ invariant(!opCtx->lockState()->isLocked());
switch (_state) {
case kDone:
@@ -331,21 +331,21 @@ void MigrationChunkClonerSourceLegacy::cancelClone(OperationContext* txn) {
_callRecipient(createRequestWithSessionId(kRecvChunkAbort, _args.getNss(), _sessionId));
// Intentional fall through
case kNew:
- _cleanup(txn);
+ _cleanup(opCtx);
break;
default:
MONGO_UNREACHABLE;
}
}
-bool MigrationChunkClonerSourceLegacy::isDocumentInMigratingChunk(OperationContext* txn,
+bool MigrationChunkClonerSourceLegacy::isDocumentInMigratingChunk(OperationContext* opCtx,
const BSONObj& doc) {
return isInRange(doc, _args.getMinKey(), _args.getMaxKey(), _shardKeyPattern);
}
-void MigrationChunkClonerSourceLegacy::onInsertOp(OperationContext* txn,
+void MigrationChunkClonerSourceLegacy::onInsertOp(OperationContext* opCtx,
const BSONObj& insertedDoc) {
- dassert(txn->lockState()->isCollectionLockedForMode(_args.getNss().ns(), MODE_IX));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(_args.getNss().ns(), MODE_IX));
BSONElement idElement = insertedDoc["_id"];
if (idElement.eoo()) {
@@ -358,12 +358,12 @@ void MigrationChunkClonerSourceLegacy::onInsertOp(OperationContext* txn,
return;
}
- txn->recoveryUnit()->registerChange(new LogOpForShardingHandler(this, idElement.wrap(), 'i'));
+ opCtx->recoveryUnit()->registerChange(new LogOpForShardingHandler(this, idElement.wrap(), 'i'));
}
-void MigrationChunkClonerSourceLegacy::onUpdateOp(OperationContext* txn,
+void MigrationChunkClonerSourceLegacy::onUpdateOp(OperationContext* opCtx,
const BSONObj& updatedDoc) {
- dassert(txn->lockState()->isCollectionLockedForMode(_args.getNss().ns(), MODE_IX));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(_args.getNss().ns(), MODE_IX));
BSONElement idElement = updatedDoc["_id"];
if (idElement.eoo()) {
@@ -376,12 +376,12 @@ void MigrationChunkClonerSourceLegacy::onUpdateOp(OperationContext* txn,
return;
}
- txn->recoveryUnit()->registerChange(new LogOpForShardingHandler(this, idElement.wrap(), 'u'));
+ opCtx->recoveryUnit()->registerChange(new LogOpForShardingHandler(this, idElement.wrap(), 'u'));
}
-void MigrationChunkClonerSourceLegacy::onDeleteOp(OperationContext* txn,
+void MigrationChunkClonerSourceLegacy::onDeleteOp(OperationContext* opCtx,
const BSONObj& deletedDocId) {
- dassert(txn->lockState()->isCollectionLockedForMode(_args.getNss().ns(), MODE_IX));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(_args.getNss().ns(), MODE_IX));
BSONElement idElement = deletedDocId["_id"];
if (idElement.eoo()) {
@@ -390,7 +390,7 @@ void MigrationChunkClonerSourceLegacy::onDeleteOp(OperationContext* txn,
return;
}
- txn->recoveryUnit()->registerChange(new LogOpForShardingHandler(this, idElement.wrap(), 'd'));
+ opCtx->recoveryUnit()->registerChange(new LogOpForShardingHandler(this, idElement.wrap(), 'd'));
}
uint64_t MigrationChunkClonerSourceLegacy::getCloneBatchBufferAllocationSize() {
@@ -400,12 +400,12 @@ uint64_t MigrationChunkClonerSourceLegacy::getCloneBatchBufferAllocationSize() {
_averageObjectSizeForCloneLocs * _cloneLocs.size());
}
-Status MigrationChunkClonerSourceLegacy::nextCloneBatch(OperationContext* txn,
+Status MigrationChunkClonerSourceLegacy::nextCloneBatch(OperationContext* opCtx,
Collection* collection,
BSONArrayBuilder* arrBuilder) {
- dassert(txn->lockState()->isCollectionLockedForMode(_args.getNss().ns(), MODE_IS));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(_args.getNss().ns(), MODE_IS));
- ElapsedTracker tracker(txn->getServiceContext()->getFastClockSource(),
+ ElapsedTracker tracker(opCtx->getServiceContext()->getFastClockSource(),
internalQueryExecYieldIterations.load(),
Milliseconds(internalQueryExecYieldPeriodMS.load()));
@@ -421,7 +421,7 @@ Status MigrationChunkClonerSourceLegacy::nextCloneBatch(OperationContext* txn,
}
Snapshotted<BSONObj> doc;
- if (collection->findDoc(txn, *it, &doc)) {
+ if (collection->findDoc(opCtx, *it, &doc)) {
// Use the builder size instead of accumulating the document sizes directly so that we
// take into consideration the overhead of BSONArray indices.
if (arrBuilder->arrSize() &&
@@ -444,10 +444,10 @@ Status MigrationChunkClonerSourceLegacy::nextCloneBatch(OperationContext* txn,
return Status::OK();
}
-Status MigrationChunkClonerSourceLegacy::nextModsBatch(OperationContext* txn,
+Status MigrationChunkClonerSourceLegacy::nextModsBatch(OperationContext* opCtx,
Database* db,
BSONObjBuilder* builder) {
- dassert(txn->lockState()->isCollectionLockedForMode(_args.getNss().ns(), MODE_IS));
+ dassert(opCtx->lockState()->isCollectionLockedForMode(_args.getNss().ns(), MODE_IS));
stdx::lock_guard<stdx::mutex> sl(_mutex);
@@ -456,15 +456,15 @@ Status MigrationChunkClonerSourceLegacy::nextModsBatch(OperationContext* txn,
long long docSizeAccumulator = 0;
- _xfer(txn, db, &_deleted, builder, "deleted", &docSizeAccumulator, false);
- _xfer(txn, db, &_reload, builder, "reload", &docSizeAccumulator, true);
+ _xfer(opCtx, db, &_deleted, builder, "deleted", &docSizeAccumulator, false);
+ _xfer(opCtx, db, &_reload, builder, "reload", &docSizeAccumulator, true);
builder->append("size", docSizeAccumulator);
return Status::OK();
}
-void MigrationChunkClonerSourceLegacy::_cleanup(OperationContext* txn) {
+void MigrationChunkClonerSourceLegacy::_cleanup(OperationContext* opCtx) {
{
stdx::lock_guard<stdx::mutex> sl(_mutex);
_state = kDone;
@@ -473,8 +473,8 @@ void MigrationChunkClonerSourceLegacy::_cleanup(OperationContext* txn) {
}
if (_deleteNotifyExec) {
- ScopedTransaction scopedXact(txn, MODE_IS);
- AutoGetCollection autoColl(txn, _args.getNss(), MODE_IS);
+ ScopedTransaction scopedXact(opCtx, MODE_IS);
+ AutoGetCollection autoColl(opCtx, _args.getNss(), MODE_IS);
_deleteNotifyExec.reset();
}
@@ -510,9 +510,9 @@ StatusWith<BSONObj> MigrationChunkClonerSourceLegacy::_callRecipient(const BSONO
return responseStatus.data.getOwned();
}
-Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* txn) {
- ScopedTransaction scopedXact(txn, MODE_IS);
- AutoGetCollection autoColl(txn, _args.getNss(), MODE_IS);
+Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opCtx) {
+ ScopedTransaction scopedXact(opCtx, MODE_IS);
+ AutoGetCollection autoColl(opCtx, _args.getNss(), MODE_IS);
Collection* const collection = autoColl.getCollection();
if (!collection) {
@@ -523,7 +523,7 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* txn
// Allow multiKey based on the invariant that shard keys must be single-valued. Therefore, any
// multi-key index prefixed by shard key cannot be multikey over the shard key fields.
IndexDescriptor* const idx =
- collection->getIndexCatalog()->findShardKeyPrefixedIndex(txn,
+ collection->getIndexCatalog()->findShardKeyPrefixedIndex(opCtx,
_shardKeyPattern.toBSON(),
false); // requireSingleKey
if (!idx) {
@@ -535,9 +535,9 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* txn
// Install the stage, which will listen for notifications on the collection
auto statusWithDeleteNotificationPlanExecutor =
- PlanExecutor::make(txn,
+ PlanExecutor::make(opCtx,
stdx::make_unique<WorkingSet>(),
- stdx::make_unique<DeleteNotificationStage>(this, txn),
+ stdx::make_unique<DeleteNotificationStage>(this, opCtx),
collection,
PlanExecutor::YIELD_MANUAL);
if (!statusWithDeleteNotificationPlanExecutor.isOK()) {
@@ -554,7 +554,7 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* txn
BSONObj max = Helpers::toKeyFormat(kp.extendRangeBound(_args.getMaxKey(), false));
std::unique_ptr<PlanExecutor> exec(
- InternalPlanner::indexScan(txn,
+ InternalPlanner::indexScan(opCtx,
collection,
idx,
min,
@@ -572,9 +572,9 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* txn
unsigned long long maxRecsWhenFull;
long long avgRecSize;
- const long long totalRecs = collection->numRecords(txn);
+ const long long totalRecs = collection->numRecords(opCtx);
if (totalRecs > 0) {
- avgRecSize = collection->dataSize(txn) / totalRecs;
+ avgRecSize = collection->dataSize(opCtx) / totalRecs;
maxRecsWhenFull = _args.getMaxChunkSizeBytes() / avgRecSize;
maxRecsWhenFull = std::min((unsigned long long)(kMaxObjectPerChunk + 1),
130 * maxRecsWhenFull / 100 /* slack */);
@@ -610,7 +610,7 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* txn
<< WorkingSetCommon::toStatusString(obj)};
}
- const uint64_t collectionAverageObjectSize = collection->averageObjectSize(txn);
+ const uint64_t collectionAverageObjectSize = collection->averageObjectSize(opCtx);
if (isLargeChunk) {
return {
@@ -638,7 +638,7 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* txn
return Status::OK();
}
-void MigrationChunkClonerSourceLegacy::_xfer(OperationContext* txn,
+void MigrationChunkClonerSourceLegacy::_xfer(OperationContext* opCtx,
Database* db,
std::list<BSONObj>* docIdList,
BSONObjBuilder* builder,
@@ -660,7 +660,7 @@ void MigrationChunkClonerSourceLegacy::_xfer(OperationContext* txn,
BSONObj idDoc = *docIdIter;
if (explode) {
BSONObj fullDoc;
- if (Helpers::findById(txn, db, ns.c_str(), idDoc, fullDoc)) {
+ if (Helpers::findById(opCtx, db, ns.c_str(), idDoc, fullDoc)) {
arr.append(fullDoc);
*sizeAccumulator += fullDoc.objsize();
}
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.h b/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
index c683df2be29..7f8b7bf5468 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
@@ -61,22 +61,22 @@ public:
HostAndPort recipientHost);
~MigrationChunkClonerSourceLegacy();
- Status startClone(OperationContext* txn) override;
+ Status startClone(OperationContext* opCtx) override;
- Status awaitUntilCriticalSectionIsAppropriate(OperationContext* txn,
+ Status awaitUntilCriticalSectionIsAppropriate(OperationContext* opCtx,
Milliseconds maxTimeToWait) override;
- Status commitClone(OperationContext* txn) override;
+ Status commitClone(OperationContext* opCtx) override;
- void cancelClone(OperationContext* txn) override;
+ void cancelClone(OperationContext* opCtx) override;
- bool isDocumentInMigratingChunk(OperationContext* txn, const BSONObj& doc) override;
+ bool isDocumentInMigratingChunk(OperationContext* opCtx, const BSONObj& doc) override;
- void onInsertOp(OperationContext* txn, const BSONObj& insertedDoc) override;
+ void onInsertOp(OperationContext* opCtx, const BSONObj& insertedDoc) override;
- void onUpdateOp(OperationContext* txn, const BSONObj& updatedDoc) override;
+ void onUpdateOp(OperationContext* opCtx, const BSONObj& updatedDoc) override;
- void onDeleteOp(OperationContext* txn, const BSONObj& deletedDocId) override;
+ void onDeleteOp(OperationContext* opCtx, const BSONObj& deletedDocId) override;
// Legacy cloner specific functionality
@@ -108,7 +108,7 @@ public:
*
* NOTE: Must be called with the collection lock held in at least IS mode.
*/
- Status nextCloneBatch(OperationContext* txn,
+ Status nextCloneBatch(OperationContext* opCtx,
Collection* collection,
BSONArrayBuilder* arrBuilder);
@@ -119,7 +119,7 @@ public:
*
* NOTE: Must be called with the collection lock held in at least IS mode.
*/
- Status nextModsBatch(OperationContext* txn, Database* db, BSONObjBuilder* builder);
+ Status nextModsBatch(OperationContext* opCtx, Database* db, BSONObjBuilder* builder);
private:
friend class DeleteNotificationStage;
@@ -132,7 +132,7 @@ private:
* Idempotent method, which cleans up any previously initialized state. It is safe to be called
* at any time, but no methods should be called after it.
*/
- void _cleanup(OperationContext* txn);
+ void _cleanup(OperationContext* opCtx);
/**
* Synchronously invokes the recipient shard with the specified command and either returns the
@@ -146,7 +146,7 @@ private:
*
* Returns OK or any error status otherwise.
*/
- Status _storeCurrentLocs(OperationContext* txn);
+ Status _storeCurrentLocs(OperationContext* opCtx);
/**
* Insert items from docIdList to a new array with the given fieldName in the given builder. If
@@ -156,7 +156,7 @@ private:
*
* Should be holding the collection lock for ns if explode is true.
*/
- void _xfer(OperationContext* txn,
+ void _xfer(OperationContext* opCtx,
Database* db,
std::list<BSONObj>* docIdList,
BSONObjBuilder* builder,
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
index 2c2df8cd3f2..a51ef083521 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
@@ -57,21 +57,21 @@ class AutoGetActiveCloner {
MONGO_DISALLOW_COPYING(AutoGetActiveCloner);
public:
- AutoGetActiveCloner(OperationContext* txn, const MigrationSessionId& migrationSessionId)
- : _scopedXact(txn, MODE_IS) {
- ShardingState* const gss = ShardingState::get(txn);
+ AutoGetActiveCloner(OperationContext* opCtx, const MigrationSessionId& migrationSessionId)
+ : _scopedXact(opCtx, MODE_IS) {
+ ShardingState* const gss = ShardingState::get(opCtx);
const auto nss = gss->getActiveDonateChunkNss();
uassert(ErrorCodes::NotYetInitialized, "No active migrations were found", nss);
// Once the collection is locked, the migration status cannot change
- _autoColl.emplace(txn, *nss, MODE_IS);
+ _autoColl.emplace(opCtx, *nss, MODE_IS);
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "Collection " << nss->ns() << " does not exist",
_autoColl->getCollection());
- auto css = CollectionShardingState::get(txn, *nss);
+ auto css = CollectionShardingState::get(opCtx, *nss);
uassert(ErrorCodes::IllegalOperation,
str::stream() << "No active migrations were found for collection " << nss->ns(),
css && css->getMigrationSourceManager());
@@ -143,7 +143,7 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string&,
BSONObj& cmdObj,
int options,
@@ -159,7 +159,7 @@ public:
int arrSizeAtPrevIteration = -1;
while (!arrBuilder || arrBuilder->arrSize() > arrSizeAtPrevIteration) {
- AutoGetActiveCloner autoCloner(txn, migrationSessionId);
+ AutoGetActiveCloner autoCloner(opCtx, migrationSessionId);
if (!arrBuilder) {
arrBuilder.emplace(autoCloner.getCloner()->getCloneBatchBufferAllocationSize());
@@ -168,7 +168,7 @@ public:
arrSizeAtPrevIteration = arrBuilder->arrSize();
uassertStatusOK(autoCloner.getCloner()->nextCloneBatch(
- txn, autoCloner.getColl(), arrBuilder.get_ptr()));
+ opCtx, autoCloner.getColl(), arrBuilder.get_ptr()));
}
invariant(arrBuilder);
@@ -207,7 +207,7 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string&,
BSONObj& cmdObj,
int options,
@@ -216,9 +216,9 @@ public:
const MigrationSessionId migrationSessionId(
uassertStatusOK(MigrationSessionId::extractFromBSON(cmdObj)));
- AutoGetActiveCloner autoCloner(txn, migrationSessionId);
+ AutoGetActiveCloner autoCloner(opCtx, migrationSessionId);
- uassertStatusOK(autoCloner.getCloner()->nextModsBatch(txn, autoCloner.getDb(), &result));
+ uassertStatusOK(autoCloner.getCloner()->nextModsBatch(opCtx, autoCloner.getDb(), &result));
return true;
}
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
index 94188337f6b..193c237db5c 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
@@ -164,7 +164,7 @@ private:
StaticCatalogClient() : ShardingCatalogClientMock(nullptr) {}
StatusWith<repl::OpTimeWith<std::vector<ShardType>>> getAllShards(
- OperationContext* txn, repl::ReadConcernLevel readConcern) override {
+ OperationContext* opCtx, repl::ReadConcernLevel readConcern) override {
ShardType donorShard;
donorShard.setName(kDonorConnStr.getSetName());
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 77f72f637e8..cb46f30b056 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -114,7 +114,7 @@ bool isInRange(const BSONObj& obj,
*
* TODO: Could optimize this check out if sharding on _id.
*/
-bool willOverrideLocalId(OperationContext* txn,
+bool willOverrideLocalId(OperationContext* opCtx,
const string& ns,
BSONObj min,
BSONObj max,
@@ -123,7 +123,7 @@ bool willOverrideLocalId(OperationContext* txn,
BSONObj remoteDoc,
BSONObj* localDoc) {
*localDoc = BSONObj();
- if (Helpers::findById(txn, db, ns.c_str(), remoteDoc, *localDoc)) {
+ if (Helpers::findById(opCtx, db, ns.c_str(), remoteDoc, *localDoc)) {
return !isInRange(*localDoc, min, max, shardKeyPattern);
}
@@ -134,14 +134,14 @@ bool willOverrideLocalId(OperationContext* txn,
* Returns true if the majority of the nodes and the nodes corresponding to the given writeConcern
* (if not empty) have applied till the specified lastOp.
*/
-bool opReplicatedEnough(OperationContext* txn,
+bool opReplicatedEnough(OperationContext* opCtx,
const repl::OpTime& lastOpApplied,
const WriteConcernOptions& writeConcern) {
WriteConcernOptions majorityWriteConcern;
majorityWriteConcern.wTimeout = -1;
majorityWriteConcern.wMode = WriteConcernOptions::kMajority;
Status majorityStatus = repl::getGlobalReplicationCoordinator()
- ->awaitReplication(txn, lastOpApplied, majorityWriteConcern)
+ ->awaitReplication(opCtx, lastOpApplied, majorityWriteConcern)
.status;
if (!writeConcern.shouldWaitForOtherNodes()) {
@@ -153,7 +153,7 @@ bool opReplicatedEnough(OperationContext* txn,
WriteConcernOptions userWriteConcern(writeConcern);
userWriteConcern.wTimeout = -1;
Status userStatus = repl::getGlobalReplicationCoordinator()
- ->awaitReplication(txn, lastOpApplied, userWriteConcern)
+ ->awaitReplication(opCtx, lastOpApplied, userWriteConcern)
.status;
return majorityStatus.isOK() && userStatus.isOK();
@@ -429,7 +429,7 @@ void MigrationDestinationManager::_migrateThread(BSONObj min,
_isActiveCV.notify_all();
}
-void MigrationDestinationManager::_migrateDriver(OperationContext* txn,
+void MigrationDestinationManager::_migrateDriver(OperationContext* opCtx,
const BSONObj& min,
const BSONObj& max,
const BSONObj& shardKeyPattern,
@@ -447,7 +447,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn,
<< epoch.toString() << " with session id " << *_sessionId;
MoveTimingHelper timing(
- txn, "to", _nss.ns(), min, max, 6 /* steps */, &_errmsg, ShardId(), ShardId());
+ opCtx, "to", _nss.ns(), min, max, 6 /* steps */, &_errmsg, ShardId(), ShardId());
const auto initialState = getState();
@@ -463,7 +463,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn,
// Just tests the connection
conn->getLastError();
- DisableDocumentValidation validationDisabler(txn);
+ DisableDocumentValidation validationDisabler(opCtx);
std::vector<BSONObj> indexSpecs;
BSONObj idIndexSpec;
@@ -483,8 +483,8 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn,
{
// 0. copy system.namespaces entry if collection doesn't already exist
- OldClientWriteContext ctx(txn, _nss.ns());
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, _nss)) {
+ OldClientWriteContext ctx(opCtx, _nss.ns());
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, _nss)) {
_errmsg = str::stream() << "Not primary during migration: " << _nss.ns()
<< ": checking if collection exists";
warning() << _errmsg;
@@ -508,8 +508,8 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn,
}
}
- WriteUnitOfWork wuow(txn);
- Status status = userCreateNS(txn, db, _nss.ns(), options, true, idIndexSpec);
+ WriteUnitOfWork wuow(opCtx);
+ Status status = userCreateNS(opCtx, db, _nss.ns(), options, true, idIndexSpec);
if (!status.isOK()) {
warning() << "failed to create collection [" << _nss << "] "
<< " with options " << options << ": " << redact(status);
@@ -521,11 +521,11 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn,
{
// 1. copy indexes
- ScopedTransaction scopedXact(txn, MODE_IX);
- Lock::DBLock lk(txn->lockState(), _nss.db(), MODE_X);
- OldClientContext ctx(txn, _nss.ns());
+ ScopedTransaction scopedXact(opCtx, MODE_IX);
+ Lock::DBLock lk(opCtx->lockState(), _nss.db(), MODE_X);
+ OldClientContext ctx(opCtx, _nss.ns());
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, _nss)) {
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, _nss)) {
_errmsg = str::stream() << "Not primary during migration: " << _nss.ns();
warning() << _errmsg;
setState(FAIL);
@@ -541,12 +541,12 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn,
return;
}
- MultiIndexBlock indexer(txn, collection);
+ MultiIndexBlock indexer(opCtx, collection);
indexer.removeExistingIndexes(&indexSpecs);
if (!indexSpecs.empty()) {
// Only copy indexes if the collection does not have any documents.
- if (collection->numRecords(txn) > 0) {
+ if (collection->numRecords(opCtx) > 0) {
_errmsg = str::stream() << "aborting migration, shard is missing "
<< indexSpecs.size() << " indexes and "
<< "collection is not empty. Non-trivial "
@@ -574,13 +574,13 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn,
return;
}
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
indexer.commit();
for (auto&& infoObj : indexInfoObjs.getValue()) {
// make sure to create index on secondaries as well
getGlobalServiceContext()->getOpObserver()->onCreateIndex(
- txn, db->getSystemIndexesName(), infoObj, true /* fromMigrate */);
+ opCtx, db->getSystemIndexesName(), infoObj, true /* fromMigrate */);
}
wunit.commit();
@@ -605,13 +605,13 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn,
deleterOptions.onlyRemoveOrphanedDocs = true;
deleterOptions.removeSaverReason = "preCleanup";
- if (!getDeleter()->deleteNow(txn, deleterOptions, &_errmsg)) {
+ if (!getDeleter()->deleteNow(opCtx, deleterOptions, &_errmsg)) {
warning() << "Failed to queue delete for migrate abort: " << redact(_errmsg);
setState(FAIL);
return;
}
- Status status = _notePending(txn, _nss, min, max, epoch);
+ Status status = _notePending(opCtx, _nss, min, max, epoch);
if (!status.isOK()) {
_errmsg = status.reason();
setState(FAIL);
@@ -646,7 +646,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn,
BSONObjIterator i(arr);
while (i.more()) {
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
if (getState() == ABORT) {
log() << "Migration aborted while copying documents";
@@ -655,10 +655,10 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn,
BSONObj docToClone = i.next().Obj();
{
- OldClientWriteContext cx(txn, _nss.ns());
+ OldClientWriteContext cx(opCtx, _nss.ns());
BSONObj localDoc;
- if (willOverrideLocalId(txn,
+ if (willOverrideLocalId(opCtx,
_nss.ns(),
min,
max,
@@ -677,7 +677,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn,
uasserted(16976, errMsg);
}
- Helpers::upsert(txn, _nss.ns(), docToClone, true);
+ Helpers::upsert(opCtx, _nss.ns(), docToClone, true);
}
thisTime++;
@@ -690,8 +690,8 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn,
if (writeConcern.shouldWaitForOtherNodes()) {
repl::ReplicationCoordinator::StatusAndDuration replStatus =
repl::getGlobalReplicationCoordinator()->awaitReplication(
- txn,
- repl::ReplClientInfo::forClient(txn->getClient()).getLastOp(),
+ opCtx,
+ repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(),
writeConcern);
if (replStatus.status.code() == ErrorCodes::WriteConcernFailed) {
warning() << "secondaryThrottle on, but doc insert timed out; "
@@ -712,7 +712,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn,
// If running on a replicated system, we'll need to flush the docs we cloned to the
// secondaries
- repl::OpTime lastOpApplied = repl::ReplClientInfo::forClient(txn->getClient()).getLastOp();
+ repl::OpTime lastOpApplied = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp();
const BSONObj xferModsRequest = createTransferModsRequest(_nss, *_sessionId);
@@ -735,20 +735,20 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn,
break;
}
- _applyMigrateOp(txn, _nss.ns(), min, max, shardKeyPattern, res, &lastOpApplied);
+ _applyMigrateOp(opCtx, _nss.ns(), min, max, shardKeyPattern, res, &lastOpApplied);
const int maxIterations = 3600 * 50;
int i;
for (i = 0; i < maxIterations; i++) {
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
if (getState() == ABORT) {
log() << "Migration aborted while waiting for replication at catch up stage";
return;
}
- if (opReplicatedEnough(txn, lastOpApplied, writeConcern))
+ if (opReplicatedEnough(opCtx, lastOpApplied, writeConcern))
break;
if (i > 100) {
@@ -776,7 +776,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn,
// until we're ready.
Timer t;
while (t.minutes() < 600) {
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
if (getState() == ABORT) {
log() << "Migration aborted while waiting for replication";
@@ -785,7 +785,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn,
log() << "Waiting for replication to catch up before entering critical section";
- if (_flushPendingWrites(txn, _nss.ns(), min, max, lastOpApplied, writeConcern)) {
+ if (_flushPendingWrites(opCtx, _nss.ns(), min, max, lastOpApplied, writeConcern)) {
break;
}
@@ -806,7 +806,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn,
bool transferAfterCommit = false;
while (getState() == STEADY || getState() == COMMIT_START) {
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
// Make sure we do at least one transfer after recv'ing the commit message. If we
// aren't sure that at least one transfer happens *after* our state changes to
@@ -826,7 +826,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn,
}
if (res["size"].number() > 0 &&
- _applyMigrateOp(txn, _nss.ns(), min, max, shardKeyPattern, res, &lastOpApplied)) {
+ _applyMigrateOp(opCtx, _nss.ns(), min, max, shardKeyPattern, res, &lastOpApplied)) {
continue;
}
@@ -839,7 +839,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn,
// 1) The from side has told us that it has locked writes (COMMIT_START)
// 2) We've checked at least one more time for un-transmitted mods
if (getState() == COMMIT_START && transferAfterCommit == true) {
- if (_flushPendingWrites(txn, _nss.ns(), min, max, lastOpApplied, writeConcern)) {
+ if (_flushPendingWrites(opCtx, _nss.ns(), min, max, lastOpApplied, writeConcern)) {
break;
}
}
@@ -867,7 +867,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn,
conn.done();
}
-bool MigrationDestinationManager::_applyMigrateOp(OperationContext* txn,
+bool MigrationDestinationManager::_applyMigrateOp(OperationContext* opCtx,
const string& ns,
const BSONObj& min,
const BSONObj& max,
@@ -882,20 +882,20 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* txn,
bool didAnything = false;
if (xfer["deleted"].isABSONObj()) {
- ScopedTransaction scopedXact(txn, MODE_IX);
- Lock::DBLock dlk(txn->lockState(), nsToDatabaseSubstring(ns), MODE_IX);
+ ScopedTransaction scopedXact(opCtx, MODE_IX);
+ Lock::DBLock dlk(opCtx->lockState(), nsToDatabaseSubstring(ns), MODE_IX);
Helpers::RemoveSaver rs("moveChunk", ns, "removedDuring");
BSONObjIterator i(xfer["deleted"].Obj()); // deleted documents
while (i.more()) {
- Lock::CollectionLock clk(txn->lockState(), ns, MODE_X);
- OldClientContext ctx(txn, ns);
+ Lock::CollectionLock clk(opCtx->lockState(), ns, MODE_X);
+ OldClientContext ctx(opCtx, ns);
BSONObj id = i.next().Obj();
// do not apply delete if doc does not belong to the chunk being migrated
BSONObj fullObj;
- if (Helpers::findById(txn, ctx.db(), ns.c_str(), id, fullObj)) {
+ if (Helpers::findById(opCtx, ctx.db(), ns.c_str(), id, fullObj)) {
if (!isInRange(fullObj, min, max, shardKeyPattern)) {
if (MONGO_FAIL_POINT(failMigrationReceivedOutOfRangeOperation)) {
invariant(0);
@@ -908,7 +908,7 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* txn,
rs.goingToDelete(fullObj);
}
- deleteObjects(txn,
+ deleteObjects(opCtx,
ctx.db() ? ctx.db()->getCollection(ns) : nullptr,
ns,
id,
@@ -917,7 +917,7 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* txn,
false /* god */,
true /* fromMigrate */);
- *lastOpApplied = repl::ReplClientInfo::forClient(txn->getClient()).getLastOp();
+ *lastOpApplied = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp();
didAnything = true;
}
}
@@ -925,7 +925,7 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* txn,
if (xfer["reload"].isABSONObj()) { // modified documents (insert/update)
BSONObjIterator i(xfer["reload"].Obj());
while (i.more()) {
- OldClientWriteContext cx(txn, ns);
+ OldClientWriteContext cx(opCtx, ns);
BSONObj updatedDoc = i.next().Obj();
@@ -939,7 +939,7 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* txn,
BSONObj localDoc;
if (willOverrideLocalId(
- txn, ns, min, max, shardKeyPattern, cx.db(), updatedDoc, &localDoc)) {
+ opCtx, ns, min, max, shardKeyPattern, cx.db(), updatedDoc, &localDoc)) {
string errMsg = str::stream() << "cannot migrate chunk, local document " << localDoc
<< " has same _id as reloaded remote document "
<< updatedDoc;
@@ -951,9 +951,9 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* txn,
}
// We are in write lock here, so sure we aren't killing
- Helpers::upsert(txn, ns, updatedDoc, true);
+ Helpers::upsert(opCtx, ns, updatedDoc, true);
- *lastOpApplied = repl::ReplClientInfo::forClient(txn->getClient()).getLastOp();
+ *lastOpApplied = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp();
didAnything = true;
}
}
@@ -961,13 +961,13 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* txn,
return didAnything;
}
-bool MigrationDestinationManager::_flushPendingWrites(OperationContext* txn,
+bool MigrationDestinationManager::_flushPendingWrites(OperationContext* opCtx,
const std::string& ns,
BSONObj min,
BSONObj max,
const repl::OpTime& lastOpApplied,
const WriteConcernOptions& writeConcern) {
- if (!opReplicatedEnough(txn, lastOpApplied, writeConcern)) {
+ if (!opReplicatedEnough(opCtx, lastOpApplied, writeConcern)) {
repl::OpTime op(lastOpApplied);
OCCASIONALLY log() << "migrate commit waiting for a majority of slaves for '" << ns << "' "
<< redact(min) << " -> " << redact(max) << " waiting for: " << op;
@@ -979,11 +979,11 @@ bool MigrationDestinationManager::_flushPendingWrites(OperationContext* txn,
{
// Get global lock to wait for write to be commited to journal.
- ScopedTransaction scopedXact(txn, MODE_S);
- Lock::GlobalRead lk(txn->lockState());
+ ScopedTransaction scopedXact(opCtx, MODE_S);
+ Lock::GlobalRead lk(opCtx->lockState());
// if durability is on, force a write to journal
- if (getDur().commitNow(txn)) {
+ if (getDur().commitNow(opCtx)) {
log() << "migrate commit flushed to journal for '" << ns << "' " << redact(min)
<< " -> " << redact(max);
}
@@ -992,15 +992,15 @@ bool MigrationDestinationManager::_flushPendingWrites(OperationContext* txn,
return true;
}
-Status MigrationDestinationManager::_notePending(OperationContext* txn,
+Status MigrationDestinationManager::_notePending(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& min,
const BSONObj& max,
const OID& epoch) {
- ScopedTransaction scopedXact(txn, MODE_IX);
- AutoGetCollection autoColl(txn, nss, MODE_IX, MODE_X);
+ ScopedTransaction scopedXact(opCtx, MODE_IX);
+ AutoGetCollection autoColl(opCtx, nss, MODE_IX, MODE_X);
- auto css = CollectionShardingState::get(txn, nss);
+ auto css = CollectionShardingState::get(opCtx, nss);
auto metadata = css->getMetadata();
// This can currently happen because drops aren't synchronized with in-migrations. The idea
@@ -1026,7 +1026,7 @@ Status MigrationDestinationManager::_notePending(OperationContext* txn,
return Status::OK();
}
-Status MigrationDestinationManager::_forgetPending(OperationContext* txn,
+Status MigrationDestinationManager::_forgetPending(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& min,
const BSONObj& max,
@@ -1040,10 +1040,10 @@ Status MigrationDestinationManager::_forgetPending(OperationContext* txn,
_chunkMarkedPending = false;
}
- ScopedTransaction scopedXact(txn, MODE_IX);
- AutoGetCollection autoColl(txn, nss, MODE_IX, MODE_X);
+ ScopedTransaction scopedXact(opCtx, MODE_IX);
+ AutoGetCollection autoColl(opCtx, nss, MODE_IX, MODE_X);
- auto css = CollectionShardingState::get(txn, nss);
+ auto css = CollectionShardingState::get(opCtx, nss);
auto metadata = css->getMetadata();
// This can currently happen because drops aren't synchronized with in-migrations. The idea
diff --git a/src/mongo/db/s/migration_destination_manager.h b/src/mongo/db/s/migration_destination_manager.h
index 4106029c0f2..700e9284159 100644
--- a/src/mongo/db/s/migration_destination_manager.h
+++ b/src/mongo/db/s/migration_destination_manager.h
@@ -125,7 +125,7 @@ private:
OID epoch,
WriteConcernOptions writeConcern);
- void _migrateDriver(OperationContext* txn,
+ void _migrateDriver(OperationContext* opCtx,
const BSONObj& min,
const BSONObj& max,
const BSONObj& shardKeyPattern,
@@ -133,7 +133,7 @@ private:
const OID& epoch,
const WriteConcernOptions& writeConcern);
- bool _applyMigrateOp(OperationContext* txn,
+ bool _applyMigrateOp(OperationContext* opCtx,
const std::string& ns,
const BSONObj& min,
const BSONObj& max,
@@ -141,7 +141,7 @@ private:
const BSONObj& xfer,
repl::OpTime* lastOpApplied);
- bool _flushPendingWrites(OperationContext* txn,
+ bool _flushPendingWrites(OperationContext* opCtx,
const std::string& ns,
BSONObj min,
BSONObj max,
@@ -158,7 +158,7 @@ private:
* TODO: Because migrations may currently be active when a collection drops, an epoch is
* necessary to ensure the pending metadata change is still applicable.
*/
- Status _notePending(OperationContext* txn,
+ Status _notePending(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& min,
const BSONObj& max,
@@ -174,7 +174,7 @@ private:
* TODO: Because migrations may currently be active when a collection drops, an epoch is
* necessary to ensure the pending metadata change is still applicable.
*/
- Status _forgetPending(OperationContext* txn,
+ Status _forgetPending(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& min,
const BSONObj& max,
diff --git a/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp b/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp
index 3d684fda290..d3d54f6710c 100644
--- a/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp
+++ b/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp
@@ -85,13 +85,13 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
string& errmsg,
BSONObjBuilder& result) {
- auto shardingState = ShardingState::get(txn);
+ auto shardingState = ShardingState::get(opCtx);
uassertStatusOK(shardingState->canAcceptShardedCommands());
const ShardId toShard(cmdObj["toShardName"].String());
@@ -106,7 +106,7 @@ public:
// consistent and predictable, generally we'd refresh anyway, and to be paranoid.
ChunkVersion currentVersion;
- Status status = shardingState->refreshMetadataNow(txn, nss, &currentVersion);
+ Status status = shardingState->refreshMetadataNow(opCtx, nss, &currentVersion);
if (!status.isOK()) {
errmsg = str::stream() << "cannot start receiving chunk "
<< redact(chunkRange.toString()) << causedBy(redact(status));
@@ -118,7 +118,7 @@ public:
const auto secondaryThrottle =
uassertStatusOK(MigrationSecondaryThrottleOptions::createFromCommand(cmdObj));
const auto writeConcern = uassertStatusOK(
- ChunkMoveWriteConcernOptions::getEffectiveWriteConcern(txn, secondaryThrottle));
+ ChunkMoveWriteConcernOptions::getEffectiveWriteConcern(opCtx, secondaryThrottle));
BSONObj shardKeyPattern = cmdObj["shardKeyPattern"].Obj().getOwned();
@@ -199,13 +199,13 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
string& errmsg,
BSONObjBuilder& result) {
- ShardingState::get(txn)->migrationDestinationManager()->report(result);
+ ShardingState::get(opCtx)->migrationDestinationManager()->report(result);
return true;
}
@@ -240,14 +240,14 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
string& errmsg,
BSONObjBuilder& result) {
auto const sessionId = uassertStatusOK(MigrationSessionId::extractFromBSON(cmdObj));
- auto mdm = ShardingState::get(txn)->migrationDestinationManager();
+ auto mdm = ShardingState::get(opCtx)->migrationDestinationManager();
Status const status = mdm->startCommit(sessionId);
mdm->report(result);
if (!status.isOK()) {
@@ -288,13 +288,13 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string&,
BSONObj& cmdObj,
int,
string& errmsg,
BSONObjBuilder& result) {
- auto const mdm = ShardingState::get(txn)->migrationDestinationManager();
+ auto const mdm = ShardingState::get(opCtx)->migrationDestinationManager();
auto migrationSessionIdStatus(MigrationSessionId::extractFromBSON(cmdObj));
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index 1de1af92316..5fb64445a75 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -76,7 +76,7 @@ MONGO_FP_DECLARE(migrationCommitNetworkError);
MONGO_FP_DECLARE(failMigrationCommit);
MONGO_FP_DECLARE(hangBeforeLeavingCriticalSection);
-MigrationSourceManager::MigrationSourceManager(OperationContext* txn,
+MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx,
MoveChunkRequest request,
ConnectionString donorConnStr,
HostAndPort recipientHost)
@@ -84,7 +84,7 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* txn,
_donorConnStr(std::move(donorConnStr)),
_recipientHost(std::move(recipientHost)),
_startTime() {
- invariant(!txn->lockState()->isLocked());
+ invariant(!opCtx->lockState()->isLocked());
// Disallow moving a chunk to ourselves
uassert(ErrorCodes::InvalidOptions,
@@ -95,11 +95,11 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* txn,
<< " with expected collection version epoch" << _args.getVersionEpoch();
// Now that the collection is locked, snapshot the metadata and fetch the latest versions
- ShardingState* const shardingState = ShardingState::get(txn);
+ ShardingState* const shardingState = ShardingState::get(opCtx);
ChunkVersion shardVersion;
- Status refreshStatus = shardingState->refreshMetadataNow(txn, getNss(), &shardVersion);
+ Status refreshStatus = shardingState->refreshMetadataNow(opCtx, getNss(), &shardVersion);
if (!refreshStatus.isOK()) {
uasserted(refreshStatus.code(),
str::stream() << "cannot start migrate of chunk " << _args.toString()
@@ -117,10 +117,10 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* txn,
// Snapshot the committed metadata from the time the migration starts
{
- ScopedTransaction scopedXact(txn, MODE_IS);
- AutoGetCollection autoColl(txn, getNss(), MODE_IS);
+ ScopedTransaction scopedXact(opCtx, MODE_IS);
+ AutoGetCollection autoColl(opCtx, getNss(), MODE_IS);
- _collectionMetadata = CollectionShardingState::get(txn, getNss())->getMetadata();
+ _collectionMetadata = CollectionShardingState::get(opCtx, getNss())->getMetadata();
_keyPattern = _collectionMetadata->getKeyPattern();
}
@@ -163,34 +163,34 @@ NamespaceString MigrationSourceManager::getNss() const {
return _args.getNss();
}
-Status MigrationSourceManager::startClone(OperationContext* txn) {
- invariant(!txn->lockState()->isLocked());
+Status MigrationSourceManager::startClone(OperationContext* opCtx) {
+ invariant(!opCtx->lockState()->isLocked());
invariant(_state == kCreated);
- auto scopedGuard = MakeGuard([&] { cleanupOnError(txn); });
-
- grid.catalogClient(txn)->logChange(txn,
- "moveChunk.start",
- getNss().ns(),
- BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey()
- << "from"
- << _args.getFromShardId()
- << "to"
- << _args.getToShardId()),
- ShardingCatalogClient::kMajorityWriteConcern);
+ auto scopedGuard = MakeGuard([&] { cleanupOnError(opCtx); });
+
+ grid.catalogClient(opCtx)->logChange(
+ opCtx,
+ "moveChunk.start",
+ getNss().ns(),
+ BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
+ << _args.getFromShardId()
+ << "to"
+ << _args.getToShardId()),
+ ShardingCatalogClient::kMajorityWriteConcern);
_cloneDriver = stdx::make_unique<MigrationChunkClonerSourceLegacy>(
_args, _collectionMetadata->getKeyPattern(), _donorConnStr, _recipientHost);
{
// Register for notifications from the replication subsystem
- ScopedTransaction scopedXact(txn, MODE_IX);
- AutoGetCollection autoColl(txn, getNss(), MODE_IX, MODE_X);
+ ScopedTransaction scopedXact(opCtx, MODE_IX);
+ AutoGetCollection autoColl(opCtx, getNss(), MODE_IX, MODE_X);
- auto css = CollectionShardingState::get(txn, getNss().ns());
- css->setMigrationSourceManager(txn, this);
+ auto css = CollectionShardingState::get(opCtx, getNss().ns());
+ css->setMigrationSourceManager(opCtx, this);
}
- Status startCloneStatus = _cloneDriver->startClone(txn);
+ Status startCloneStatus = _cloneDriver->startClone(opCtx);
if (!startCloneStatus.isOK()) {
return startCloneStatus;
}
@@ -200,14 +200,14 @@ Status MigrationSourceManager::startClone(OperationContext* txn) {
return Status::OK();
}
-Status MigrationSourceManager::awaitToCatchUp(OperationContext* txn) {
- invariant(!txn->lockState()->isLocked());
+Status MigrationSourceManager::awaitToCatchUp(OperationContext* opCtx) {
+ invariant(!opCtx->lockState()->isLocked());
invariant(_state == kCloning);
- auto scopedGuard = MakeGuard([&] { cleanupOnError(txn); });
+ auto scopedGuard = MakeGuard([&] { cleanupOnError(opCtx); });
// Block until the cloner deems it appropriate to enter the critical section.
Status catchUpStatus = _cloneDriver->awaitUntilCriticalSectionIsAppropriate(
- txn, kMaxWaitToEnterCriticalSectionTimeout);
+ opCtx, kMaxWaitToEnterCriticalSectionTimeout);
if (!catchUpStatus.isOK()) {
return catchUpStatus;
}
@@ -217,13 +217,13 @@ Status MigrationSourceManager::awaitToCatchUp(OperationContext* txn) {
return Status::OK();
}
-Status MigrationSourceManager::enterCriticalSection(OperationContext* txn) {
- invariant(!txn->lockState()->isLocked());
+Status MigrationSourceManager::enterCriticalSection(OperationContext* opCtx) {
+ invariant(!opCtx->lockState()->isLocked());
invariant(_state == kCloneCaughtUp);
- auto scopedGuard = MakeGuard([&] { cleanupOnError(txn); });
+ auto scopedGuard = MakeGuard([&] { cleanupOnError(opCtx); });
// Mark the shard as running critical operation, which requires recovery on crash
- Status status = ShardingStateRecovery::startMetadataOp(txn);
+ Status status = ShardingStateRecovery::startMetadataOp(opCtx);
if (!status.isOK()) {
return status;
}
@@ -232,11 +232,11 @@ Status MigrationSourceManager::enterCriticalSection(OperationContext* txn) {
// The critical section must be entered with collection X lock in order to ensure there are
// no writes which could have entered and passed the version check just before we entered
// the crticial section, but managed to complete after we left it.
- ScopedTransaction scopedXact(txn, MODE_IX);
- AutoGetCollection autoColl(txn, getNss(), MODE_IX, MODE_X);
+ ScopedTransaction scopedXact(opCtx, MODE_IX);
+ AutoGetCollection autoColl(opCtx, getNss(), MODE_IX, MODE_X);
// Check that the collection has not been dropped or recreated since the migration began.
- auto css = CollectionShardingState::get(txn, getNss().ns());
+ auto css = CollectionShardingState::get(opCtx, getNss().ns());
auto metadata = css->getMetadata();
if (!metadata ||
(metadata->getCollVersion().epoch() != _collectionMetadata->getCollVersion().epoch())) {
@@ -261,13 +261,13 @@ Status MigrationSourceManager::enterCriticalSection(OperationContext* txn) {
return Status::OK();
}
-Status MigrationSourceManager::commitChunkOnRecipient(OperationContext* txn) {
- invariant(!txn->lockState()->isLocked());
+Status MigrationSourceManager::commitChunkOnRecipient(OperationContext* opCtx) {
+ invariant(!opCtx->lockState()->isLocked());
invariant(_state == kCriticalSection);
- auto scopedGuard = MakeGuard([&] { cleanupOnError(txn); });
+ auto scopedGuard = MakeGuard([&] { cleanupOnError(opCtx); });
// Tell the recipient shard to fetch the latest changes.
- Status commitCloneStatus = _cloneDriver->commitClone(txn);
+ Status commitCloneStatus = _cloneDriver->commitClone(opCtx);
if (MONGO_FAIL_POINT(failMigrationCommit) && commitCloneStatus.isOK()) {
commitCloneStatus = {ErrorCodes::InternalError,
@@ -284,10 +284,10 @@ Status MigrationSourceManager::commitChunkOnRecipient(OperationContext* txn) {
return Status::OK();
}
-Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* txn) {
- invariant(!txn->lockState()->isLocked());
+Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opCtx) {
+ invariant(!opCtx->lockState()->isLocked());
invariant(_state == kCloneCompleted);
- auto scopedGuard = MakeGuard([&] { cleanupOnError(txn); });
+ auto scopedGuard = MakeGuard([&] { cleanupOnError(opCtx); });
ChunkType migratedChunkType;
migratedChunkType.setMin(_args.getMinKey());
@@ -319,7 +319,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* txn
auto commitChunkMigrationResponse =
grid.shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"admin",
builder.obj(),
@@ -342,8 +342,8 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* txn
"against the config server to obtain its latest optime"
<< causedBy(redact(migrationCommitStatus));
- Status status = grid.catalogClient(txn)->logChange(
- txn,
+ Status status = grid.catalogClient(opCtx)->logChange(
+ opCtx,
"moveChunk.validating",
getNss().ns(),
BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
@@ -376,13 +376,13 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* txn
// up so that subsequent requests will try to do a full refresh.
ChunkVersion unusedShardVersion;
Status refreshStatus =
- ShardingState::get(txn)->refreshMetadataNow(txn, getNss(), &unusedShardVersion);
+ ShardingState::get(opCtx)->refreshMetadataNow(opCtx, getNss(), &unusedShardVersion);
if (refreshStatus.isOK()) {
- ScopedTransaction scopedXact(txn, MODE_IS);
- AutoGetCollection autoColl(txn, getNss(), MODE_IS);
+ ScopedTransaction scopedXact(opCtx, MODE_IS);
+ AutoGetCollection autoColl(opCtx, getNss(), MODE_IS);
- auto refreshedMetadata = CollectionShardingState::get(txn, getNss())->getMetadata();
+ auto refreshedMetadata = CollectionShardingState::get(opCtx, getNss())->getMetadata();
if (!refreshedMetadata) {
return {ErrorCodes::NamespaceNotSharded,
@@ -402,10 +402,10 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* txn
log() << "Migration succeeded and updated collection version to "
<< refreshedMetadata->getCollVersion();
} else {
- ScopedTransaction scopedXact(txn, MODE_IX);
- AutoGetCollection autoColl(txn, getNss(), MODE_IX, MODE_X);
+ ScopedTransaction scopedXact(opCtx, MODE_IX);
+ AutoGetCollection autoColl(opCtx, getNss(), MODE_IX, MODE_X);
- CollectionShardingState::get(txn, getNss())->refreshMetadata(txn, nullptr);
+ CollectionShardingState::get(opCtx, getNss())->refreshMetadata(opCtx, nullptr);
log() << "Failed to refresh metadata after a failed commit attempt. Metadata was cleared "
"so it will get a full refresh when accessed again"
@@ -420,52 +420,52 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* txn
MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangBeforeLeavingCriticalSection);
scopedGuard.Dismiss();
- _cleanup(txn);
-
- grid.catalogClient(txn)->logChange(txn,
- "moveChunk.commit",
- getNss().ns(),
- BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey()
- << "from"
- << _args.getFromShardId()
- << "to"
- << _args.getToShardId()),
- ShardingCatalogClient::kMajorityWriteConcern);
+ _cleanup(opCtx);
+
+ grid.catalogClient(opCtx)->logChange(
+ opCtx,
+ "moveChunk.commit",
+ getNss().ns(),
+ BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
+ << _args.getFromShardId()
+ << "to"
+ << _args.getToShardId()),
+ ShardingCatalogClient::kMajorityWriteConcern);
return Status::OK();
}
-void MigrationSourceManager::cleanupOnError(OperationContext* txn) {
+void MigrationSourceManager::cleanupOnError(OperationContext* opCtx) {
if (_state == kDone) {
return;
}
- grid.catalogClient(txn)->logChange(txn,
- "moveChunk.error",
- getNss().ns(),
- BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey()
- << "from"
- << _args.getFromShardId()
- << "to"
- << _args.getToShardId()),
- ShardingCatalogClient::kMajorityWriteConcern);
-
- _cleanup(txn);
+ grid.catalogClient(opCtx)->logChange(
+ opCtx,
+ "moveChunk.error",
+ getNss().ns(),
+ BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
+ << _args.getFromShardId()
+ << "to"
+ << _args.getToShardId()),
+ ShardingCatalogClient::kMajorityWriteConcern);
+
+ _cleanup(opCtx);
}
-void MigrationSourceManager::_cleanup(OperationContext* txn) {
+void MigrationSourceManager::_cleanup(OperationContext* opCtx) {
invariant(_state != kDone);
auto cloneDriver = [&]() {
// Unregister from the collection's sharding state
- ScopedTransaction scopedXact(txn, MODE_IX);
- AutoGetCollection autoColl(txn, getNss(), MODE_IX, MODE_X);
+ ScopedTransaction scopedXact(opCtx, MODE_IX);
+ AutoGetCollection autoColl(opCtx, getNss(), MODE_IX, MODE_X);
- auto css = CollectionShardingState::get(txn, getNss().ns());
+ auto css = CollectionShardingState::get(opCtx, getNss().ns());
// The migration source manager is not visible anymore after it is unregistered from the
// collection
- css->clearMigrationSourceManager(txn);
+ css->clearMigrationSourceManager(opCtx);
// Leave the critical section.
if (_critSecSignal) {
@@ -478,11 +478,11 @@ void MigrationSourceManager::_cleanup(OperationContext* txn) {
// Decrement the metadata op counter outside of the collection lock in order to hold it for as
// short as possible.
if (_state == kCriticalSection || _state == kCloneCompleted) {
- ShardingStateRecovery::endMetadataOp(txn);
+ ShardingStateRecovery::endMetadataOp(opCtx);
}
if (cloneDriver) {
- cloneDriver->cancelClone(txn);
+ cloneDriver->cancelClone(opCtx);
}
_state = kDone;
diff --git a/src/mongo/db/s/migration_source_manager.h b/src/mongo/db/s/migration_source_manager.h
index cb5ce4be792..c0822ca798a 100644
--- a/src/mongo/db/s/migration_source_manager.h
+++ b/src/mongo/db/s/migration_source_manager.h
@@ -83,7 +83,7 @@ public:
* - SendStaleConfigException if the expected collection version does not match what we find it
* to be after acquiring the distributed lock.
*/
- MigrationSourceManager(OperationContext* txn,
+ MigrationSourceManager(OperationContext* opCtx,
MoveChunkRequest request,
ConnectionString donorConnStr,
HostAndPort recipientHost);
@@ -101,7 +101,7 @@ public:
* Expected state: kCreated
* Resulting state: kCloning on success, kDone on failure
*/
- Status startClone(OperationContext* txn);
+ Status startClone(OperationContext* opCtx);
/**
* Waits for the cloning to catch up sufficiently so we won't have to stay in the critical
@@ -111,7 +111,7 @@ public:
* Expected state: kCloning
* Resulting state: kCloneCaughtUp on success, kDone on failure
*/
- Status awaitToCatchUp(OperationContext* txn);
+ Status awaitToCatchUp(OperationContext* opCtx);
/**
* Waits for the active clone operation to catch up and enters critical section. Once this call
@@ -122,7 +122,7 @@ public:
* Expected state: kCloneCaughtUp
* Resulting state: kCriticalSection on success, kDone on failure
*/
- Status enterCriticalSection(OperationContext* txn);
+ Status enterCriticalSection(OperationContext* opCtx);
/**
* Tells the recipient of the chunk to commit the chunk contents, which it received.
@@ -130,7 +130,7 @@ public:
* Expected state: kCriticalSection
* Resulting state: kCloneCompleted on success, kDone on failure
*/
- Status commitChunkOnRecipient(OperationContext* txn);
+ Status commitChunkOnRecipient(OperationContext* opCtx);
/**
* Tells the recipient shard to fetch the latest portion of data from the donor and to commit it
@@ -144,7 +144,7 @@ public:
* Expected state: kCloneCompleted
* Resulting state: kDone
*/
- Status commitChunkMetadataOnConfig(OperationContext* txn);
+ Status commitChunkMetadataOnConfig(OperationContext* opCtx);
/**
* May be called at any time. Unregisters the migration source manager from the collection,
@@ -154,7 +154,7 @@ public:
* Expected state: Any
* Resulting state: kDone
*/
- void cleanupOnError(OperationContext* txn);
+ void cleanupOnError(OperationContext* opCtx);
/**
* Returns the key pattern object for the stored committed metadata.
@@ -200,7 +200,7 @@ private:
* Called when any of the states fails. May only be called once and will put the migration
* manager into the kDone state.
*/
- void _cleanup(OperationContext* txn);
+ void _cleanup(OperationContext* opCtx);
// The parameters to the moveChunk command
const MoveChunkRequest _args;
diff --git a/src/mongo/db/s/move_chunk_command.cpp b/src/mongo/db/s/move_chunk_command.cpp
index 03a03c285bb..08ea3723920 100644
--- a/src/mongo/db/s/move_chunk_command.cpp
+++ b/src/mongo/db/s/move_chunk_command.cpp
@@ -110,13 +110,13 @@ public:
return parseNsFullyQualified(dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) override {
- auto shardingState = ShardingState::get(txn);
+ auto shardingState = ShardingState::get(opCtx);
uassertStatusOK(shardingState->canAcceptShardedCommands());
const MoveChunkRequest moveChunkRequest = uassertStatusOK(
@@ -124,7 +124,7 @@ public:
// Make sure we're as up-to-date as possible with shard information. This catches the case
// where we might have changed a shard's host by removing/adding a shard with the same name.
- grid.shardRegistry()->reload(txn);
+ grid.shardRegistry()->reload(opCtx);
auto scopedRegisterMigration =
uassertStatusOK(shardingState->registerDonateChunk(moveChunkRequest));
@@ -134,7 +134,7 @@ public:
// Check if there is an existing migration running and if so, join it
if (scopedRegisterMigration.mustExecute()) {
try {
- _runImpl(txn, moveChunkRequest);
+ _runImpl(opCtx, moveChunkRequest);
status = Status::OK();
} catch (const DBException& e) {
status = e.toStatus();
@@ -148,7 +148,7 @@ public:
scopedRegisterMigration.complete(status);
} else {
- status = scopedRegisterMigration.waitForCompletion(txn);
+ status = scopedRegisterMigration.waitForCompletion(opCtx);
}
if (status == ErrorCodes::ChunkTooBig) {
@@ -165,27 +165,27 @@ public:
}
private:
- static void _runImpl(OperationContext* txn, const MoveChunkRequest& moveChunkRequest) {
+ static void _runImpl(OperationContext* opCtx, const MoveChunkRequest& moveChunkRequest) {
const auto writeConcernForRangeDeleter =
uassertStatusOK(ChunkMoveWriteConcernOptions::getEffectiveWriteConcern(
- txn, moveChunkRequest.getSecondaryThrottle()));
+ opCtx, moveChunkRequest.getSecondaryThrottle()));
// Resolve the donor and recipient shards and their connection string
- auto const shardRegistry = Grid::get(txn)->shardRegistry();
+ auto const shardRegistry = Grid::get(opCtx)->shardRegistry();
const auto donorConnStr =
- uassertStatusOK(shardRegistry->getShard(txn, moveChunkRequest.getFromShardId()))
+ uassertStatusOK(shardRegistry->getShard(opCtx, moveChunkRequest.getFromShardId()))
->getConnString();
const auto recipientHost = uassertStatusOK([&] {
auto recipientShard =
- uassertStatusOK(shardRegistry->getShard(txn, moveChunkRequest.getToShardId()));
+ uassertStatusOK(shardRegistry->getShard(opCtx, moveChunkRequest.getToShardId()));
return recipientShard->getTargeter()->findHostNoWait(
ReadPreferenceSetting{ReadPreference::PrimaryOnly});
}());
string unusedErrMsg;
- MoveTimingHelper moveTimingHelper(txn,
+ MoveTimingHelper moveTimingHelper(opCtx,
"from",
moveChunkRequest.getNss().ns(),
moveChunkRequest.getMinKey(),
@@ -202,27 +202,27 @@ private:
{
MigrationSourceManager migrationSourceManager(
- txn, moveChunkRequest, donorConnStr, recipientHost);
+ opCtx, moveChunkRequest, donorConnStr, recipientHost);
shardKeyPattern = migrationSourceManager.getKeyPattern().getOwned();
moveTimingHelper.done(2);
MONGO_FAIL_POINT_PAUSE_WHILE_SET(moveChunkHangAtStep2);
- uassertStatusOKWithWarning(migrationSourceManager.startClone(txn));
+ uassertStatusOKWithWarning(migrationSourceManager.startClone(opCtx));
moveTimingHelper.done(3);
MONGO_FAIL_POINT_PAUSE_WHILE_SET(moveChunkHangAtStep3);
- uassertStatusOKWithWarning(migrationSourceManager.awaitToCatchUp(txn));
+ uassertStatusOKWithWarning(migrationSourceManager.awaitToCatchUp(opCtx));
moveTimingHelper.done(4);
MONGO_FAIL_POINT_PAUSE_WHILE_SET(moveChunkHangAtStep4);
- uassertStatusOKWithWarning(migrationSourceManager.enterCriticalSection(txn));
- uassertStatusOKWithWarning(migrationSourceManager.commitChunkOnRecipient(txn));
+ uassertStatusOKWithWarning(migrationSourceManager.enterCriticalSection(opCtx));
+ uassertStatusOKWithWarning(migrationSourceManager.commitChunkOnRecipient(opCtx));
moveTimingHelper.done(5);
MONGO_FAIL_POINT_PAUSE_WHILE_SET(moveChunkHangAtStep5);
- uassertStatusOKWithWarning(migrationSourceManager.commitChunkMetadataOnConfig(txn));
+ uassertStatusOKWithWarning(migrationSourceManager.commitChunkMetadataOnConfig(opCtx));
moveTimingHelper.done(6);
MONGO_FAIL_POINT_PAUSE_WHILE_SET(moveChunkHangAtStep6);
}
@@ -245,14 +245,14 @@ private:
// This is an immediate delete, and as a consequence, there could be more
// deletes happening simultaneously than there are deleter worker threads.
- if (!getDeleter()->deleteNow(txn, deleterOptions, &errMsg)) {
+ if (!getDeleter()->deleteNow(opCtx, deleterOptions, &errMsg)) {
log() << "Error occured while performing cleanup: " << redact(errMsg);
}
} else {
log() << "forking for cleanup of chunk data";
string errMsg;
- if (!getDeleter()->queueDelete(txn,
+ if (!getDeleter()->queueDelete(opCtx,
deleterOptions,
NULL, // Don't want to be notified
&errMsg)) {
diff --git a/src/mongo/db/s/move_timing_helper.cpp b/src/mongo/db/s/move_timing_helper.cpp
index 222a5383002..89c305cda43 100644
--- a/src/mongo/db/s/move_timing_helper.cpp
+++ b/src/mongo/db/s/move_timing_helper.cpp
@@ -39,7 +39,7 @@
namespace mongo {
-MoveTimingHelper::MoveTimingHelper(OperationContext* txn,
+MoveTimingHelper::MoveTimingHelper(OperationContext* opCtx,
const std::string& where,
const std::string& ns,
const BSONObj& min,
@@ -48,7 +48,7 @@ MoveTimingHelper::MoveTimingHelper(OperationContext* txn,
std::string* cmdErrmsg,
const ShardId& toShard,
const ShardId& fromShard)
- : _txn(txn),
+ : _opCtx(opCtx),
_where(where),
_ns(ns),
_to(toShard),
@@ -82,11 +82,11 @@ MoveTimingHelper::~MoveTimingHelper() {
_b.append("errmsg", *_cmdErrmsg);
}
- grid.catalogClient(_txn)->logChange(_txn,
- str::stream() << "moveChunk." << _where,
- _ns,
- _b.obj(),
- ShardingCatalogClient::kMajorityWriteConcern);
+ grid.catalogClient(_opCtx)->logChange(_opCtx,
+ str::stream() << "moveChunk." << _where,
+ _ns,
+ _b.obj(),
+ ShardingCatalogClient::kMajorityWriteConcern);
} catch (const std::exception& e) {
warning() << "couldn't record timing for moveChunk '" << _where
<< "': " << redact(e.what());
@@ -99,10 +99,10 @@ void MoveTimingHelper::done(int step) {
const std::string s = str::stream() << "step " << step << " of " << _totalNumSteps;
- CurOp* op = CurOp::get(_txn);
+ CurOp* op = CurOp::get(_opCtx);
{
- stdx::lock_guard<Client> lk(*_txn->getClient());
+ stdx::lock_guard<Client> lk(*_opCtx->getClient());
op->setMessage_inlock(s.c_str());
}
diff --git a/src/mongo/db/s/move_timing_helper.h b/src/mongo/db/s/move_timing_helper.h
index bc1f2644ac7..eb8194f1ae6 100644
--- a/src/mongo/db/s/move_timing_helper.h
+++ b/src/mongo/db/s/move_timing_helper.h
@@ -41,7 +41,7 @@ class OperationContext;
class MoveTimingHelper {
public:
- MoveTimingHelper(OperationContext* txn,
+ MoveTimingHelper(OperationContext* opCtx,
const std::string& where,
const std::string& ns,
const BSONObj& min,
@@ -58,7 +58,7 @@ private:
// Measures how long the receiving of a chunk takes
Timer _t;
- OperationContext* const _txn;
+ OperationContext* const _opCtx;
const std::string _where;
const std::string _ns;
const ShardId _to;
diff --git a/src/mongo/db/s/operation_sharding_state.cpp b/src/mongo/db/s/operation_sharding_state.cpp
index 0f92bbd5492..fc13bf41287 100644
--- a/src/mongo/db/s/operation_sharding_state.cpp
+++ b/src/mongo/db/s/operation_sharding_state.cpp
@@ -46,8 +46,8 @@ const Microseconds kMaxWaitForMigrationCriticalSection = Minutes(5);
OperationShardingState::OperationShardingState() = default;
-OperationShardingState& OperationShardingState::get(OperationContext* txn) {
- return shardingMetadataDecoration(txn);
+OperationShardingState& OperationShardingState::get(OperationContext* opCtx) {
+ return shardingMetadataDecoration(opCtx);
}
void OperationShardingState::initializeShardVersion(NamespaceString nss,
@@ -101,15 +101,15 @@ void OperationShardingState::unsetShardVersion(NamespaceString nss) {
_clear();
}
-bool OperationShardingState::waitForMigrationCriticalSectionSignal(OperationContext* txn) {
+bool OperationShardingState::waitForMigrationCriticalSectionSignal(OperationContext* opCtx) {
// Must not block while holding a lock
- invariant(!txn->lockState()->isLocked());
+ invariant(!opCtx->lockState()->isLocked());
if (_migrationCriticalSectionSignal) {
_migrationCriticalSectionSignal->waitFor(
- txn,
- txn->hasDeadline()
- ? std::min(txn->getRemainingMaxTimeMicros(), kMaxWaitForMigrationCriticalSection)
+ opCtx,
+ opCtx->hasDeadline()
+ ? std::min(opCtx->getRemainingMaxTimeMicros(), kMaxWaitForMigrationCriticalSection)
: kMaxWaitForMigrationCriticalSection);
_migrationCriticalSectionSignal = nullptr;
return true;
@@ -130,10 +130,10 @@ void OperationShardingState::_clear() {
_ns = NamespaceString();
}
-OperationShardingState::IgnoreVersioningBlock::IgnoreVersioningBlock(OperationContext* txn,
+OperationShardingState::IgnoreVersioningBlock::IgnoreVersioningBlock(OperationContext* opCtx,
const NamespaceString& ns)
- : _txn(txn), _ns(ns) {
- auto& oss = OperationShardingState::get(txn);
+ : _opCtx(opCtx), _ns(ns) {
+ auto& oss = OperationShardingState::get(opCtx);
_hadOriginalVersion = oss._hasVersion;
if (_hadOriginalVersion) {
_originalVersion = oss.getShardVersion(ns);
@@ -142,7 +142,7 @@ OperationShardingState::IgnoreVersioningBlock::IgnoreVersioningBlock(OperationCo
}
OperationShardingState::IgnoreVersioningBlock::~IgnoreVersioningBlock() {
- auto& oss = OperationShardingState::get(_txn);
+ auto& oss = OperationShardingState::get(_opCtx);
invariant(ChunkVersion::isIgnoredVersion(oss.getShardVersion(_ns)));
if (_hadOriginalVersion) {
oss.setShardVersion(_ns, _originalVersion);
diff --git a/src/mongo/db/s/operation_sharding_state.h b/src/mongo/db/s/operation_sharding_state.h
index aa03834da6a..d4a0e778af1 100644
--- a/src/mongo/db/s/operation_sharding_state.h
+++ b/src/mongo/db/s/operation_sharding_state.h
@@ -56,9 +56,9 @@ public:
OperationShardingState();
/**
- * Retrieves a reference to the shard version decorating the OperationContext, 'txn'.
+ * Retrieves a reference to the shard version decorating the OperationContext, 'opCtx'.
*/
- static OperationShardingState& get(OperationContext* txn);
+ static OperationShardingState& get(OperationContext* opCtx);
/**
* Parses shard version from the command parameters 'cmdObj' and stores the results in this
@@ -104,7 +104,7 @@ public:
* Returns true if the call actually waited because of migration critical section (regardless if
* whether it timed out or not), false if there was no active migration critical section.
*/
- bool waitForMigrationCriticalSectionSignal(OperationContext* txn);
+ bool waitForMigrationCriticalSectionSignal(OperationContext* opCtx);
/**
* Setting this value indicates that when the version check failed, there was an active
@@ -140,11 +140,11 @@ class OperationShardingState::IgnoreVersioningBlock {
MONGO_DISALLOW_COPYING(IgnoreVersioningBlock);
public:
- IgnoreVersioningBlock(OperationContext* txn, const NamespaceString& ns);
+ IgnoreVersioningBlock(OperationContext* opCtx, const NamespaceString& ns);
~IgnoreVersioningBlock();
private:
- OperationContext* _txn;
+ OperationContext* _opCtx;
NamespaceString _ns;
ChunkVersion _originalVersion;
bool _hadOriginalVersion;
diff --git a/src/mongo/db/s/set_shard_version_command.cpp b/src/mongo/db/s/set_shard_version_command.cpp
index d4cc01f1d06..a0ed2ca7e6b 100644
--- a/src/mongo/db/s/set_shard_version_command.cpp
+++ b/src/mongo/db/s/set_shard_version_command.cpp
@@ -88,13 +88,13 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string&,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
- auto shardingState = ShardingState::get(txn);
+ auto shardingState = ShardingState::get(opCtx);
uassertStatusOK(shardingState->canAcceptShardedCommands());
// Steps
@@ -128,7 +128,7 @@ public:
// Step 1
- Client* client = txn->getClient();
+ Client* client = opCtx->getClient();
LastError::get(client).disable();
const bool authoritative = cmdObj.getBoolField("authoritative");
@@ -156,7 +156,7 @@ public:
// Validate shardName parameter.
string shardName = cmdObj["shard"].str();
- auto storedShardName = ShardingState::get(txn)->getShardName();
+ auto storedShardName = ShardingState::get(opCtx)->getShardName();
uassert(ErrorCodes::BadValue,
str::stream() << "received shardName " << shardName
<< " which differs from stored shardName "
@@ -180,7 +180,7 @@ public:
return false;
}
- ConnectionString storedConnStr = ShardingState::get(txn)->getConfigServer(txn);
+ ConnectionString storedConnStr = ShardingState::get(opCtx)->getConfigServer(opCtx);
if (givenConnStr.getSetName() != storedConnStr.getSetName()) {
errmsg = str::stream()
<< "given config server set name: " << givenConnStr.getSetName()
@@ -215,10 +215,10 @@ public:
{
boost::optional<AutoGetDb> autoDb;
- autoDb.emplace(txn, nss.db(), MODE_IS);
+ autoDb.emplace(opCtx, nss.db(), MODE_IS);
// we can run on a slave up to here
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(txn,
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(opCtx,
nss.db())) {
result.append("errmsg", "not master");
result.append("note", "from post init in setShardVersion");
@@ -227,14 +227,14 @@ public:
// Views do not require a shard version check.
if (autoDb->getDb() && !autoDb->getDb()->getCollection(nss.ns()) &&
- autoDb->getDb()->getViewCatalog()->lookup(txn, nss.ns())) {
+ autoDb->getDb()->getViewCatalog()->lookup(opCtx, nss.ns())) {
return true;
}
boost::optional<Lock::CollectionLock> collLock;
- collLock.emplace(txn->lockState(), nss.ns(), MODE_IS);
+ collLock.emplace(opCtx->lockState(), nss.ns(), MODE_IS);
- auto css = CollectionShardingState::get(txn, nss);
+ auto css = CollectionShardingState::get(opCtx, nss);
const ChunkVersion collectionShardVersion =
(css->getMetadata() ? css->getMetadata()->getShardVersion()
: ChunkVersion::UNSHARDED());
@@ -306,7 +306,7 @@ public:
collLock.reset();
autoDb.reset();
log() << "waiting till out of critical section";
- critSecSignal->waitFor(txn, Seconds(10));
+ critSecSignal->waitFor(opCtx, Seconds(10));
}
}
@@ -329,7 +329,7 @@ public:
collLock.reset();
autoDb.reset();
log() << "waiting till out of critical section";
- critSecSignal->waitFor(txn, Seconds(10));
+ critSecSignal->waitFor(opCtx, Seconds(10));
}
}
@@ -346,13 +346,13 @@ public:
// Step 7
- Status status = shardingState->onStaleShardVersion(txn, nss, requestedVersion);
+ Status status = shardingState->onStaleShardVersion(opCtx, nss, requestedVersion);
{
- AutoGetCollection autoColl(txn, nss, MODE_IS);
+ AutoGetCollection autoColl(opCtx, nss, MODE_IS);
ChunkVersion currVersion = ChunkVersion::UNSHARDED();
- auto collMetadata = CollectionShardingState::get(txn, nss)->getMetadata();
+ auto collMetadata = CollectionShardingState::get(opCtx, nss)->getMetadata();
if (collMetadata) {
currVersion = collMetadata->getShardVersion();
}
diff --git a/src/mongo/db/s/shard_identity_rollback_notifier.cpp b/src/mongo/db/s/shard_identity_rollback_notifier.cpp
index b78efaa6fef..118cdb038b6 100644
--- a/src/mongo/db/s/shard_identity_rollback_notifier.cpp
+++ b/src/mongo/db/s/shard_identity_rollback_notifier.cpp
@@ -40,12 +40,12 @@ const auto getRollbackNotifier = ServiceContext::declareDecoration<ShardIdentity
ShardIdentityRollbackNotifier::ShardIdentityRollbackNotifier() = default;
-ShardIdentityRollbackNotifier* ShardIdentityRollbackNotifier::get(OperationContext* txn) {
- return get(txn->getServiceContext());
+ShardIdentityRollbackNotifier* ShardIdentityRollbackNotifier::get(OperationContext* opCtx) {
+ return get(opCtx->getServiceContext());
}
-ShardIdentityRollbackNotifier* ShardIdentityRollbackNotifier::get(ServiceContext* txn) {
- return &getRollbackNotifier(txn);
+ShardIdentityRollbackNotifier* ShardIdentityRollbackNotifier::get(ServiceContext* opCtx) {
+ return &getRollbackNotifier(opCtx);
}
diff --git a/src/mongo/db/s/shard_identity_rollback_notifier.h b/src/mongo/db/s/shard_identity_rollback_notifier.h
index a8bb6592350..4ce184065a2 100644
--- a/src/mongo/db/s/shard_identity_rollback_notifier.h
+++ b/src/mongo/db/s/shard_identity_rollback_notifier.h
@@ -59,8 +59,8 @@ public:
/**
* Retrieves the ShardIdentityRollbackNotifier associated with the specified service context.
*/
- static ShardIdentityRollbackNotifier* get(OperationContext* txn);
- static ShardIdentityRollbackNotifier* get(ServiceContext* txn);
+ static ShardIdentityRollbackNotifier* get(OperationContext* opCtx);
+ static ShardIdentityRollbackNotifier* get(ServiceContext* opCtx);
/**
* Records the fact that the shardIdentity document was rolled back.
diff --git a/src/mongo/db/s/sharding_initialization_mongod.cpp b/src/mongo/db/s/sharding_initialization_mongod.cpp
index ba292c269c2..b81af5a051a 100644
--- a/src/mongo/db/s/sharding_initialization_mongod.cpp
+++ b/src/mongo/db/s/sharding_initialization_mongod.cpp
@@ -49,7 +49,7 @@
namespace mongo {
-Status initializeGlobalShardingStateForMongod(OperationContext* txn,
+Status initializeGlobalShardingStateForMongod(OperationContext* opCtx,
const ConnectionString& configCS,
StringData distLockProcessId) {
auto targeterFactory = stdx::make_unique<RemoteCommandTargeterFactoryImpl>();
@@ -82,7 +82,7 @@ Status initializeGlobalShardingStateForMongod(OperationContext* txn,
stdx::make_unique<ShardFactory>(std::move(buildersMap), std::move(targeterFactory));
return initializeGlobalShardingState(
- txn,
+ opCtx,
configCS,
distLockProcessId,
std::move(shardFactory),
diff --git a/src/mongo/db/s/sharding_initialization_mongod.h b/src/mongo/db/s/sharding_initialization_mongod.h
index faf24aededd..cf714002921 100644
--- a/src/mongo/db/s/sharding_initialization_mongod.h
+++ b/src/mongo/db/s/sharding_initialization_mongod.h
@@ -42,7 +42,7 @@ class Status;
*
* NOTE: This does not initialize ShardingState, which should only be done for shard servers.
*/
-Status initializeGlobalShardingStateForMongod(OperationContext* txn,
+Status initializeGlobalShardingStateForMongod(OperationContext* opCtx,
const ConnectionString& configCS,
StringData distLockProcessId);
diff --git a/src/mongo/db/s/sharding_server_status.cpp b/src/mongo/db/s/sharding_server_status.cpp
index 8f58e24600b..80fd5f12566 100644
--- a/src/mongo/db/s/sharding_server_status.cpp
+++ b/src/mongo/db/s/sharding_server_status.cpp
@@ -45,21 +45,22 @@ public:
return true;
}
- BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const final {
+ BSONObj generateSection(OperationContext* opCtx, const BSONElement& configElement) const final {
BSONObjBuilder result;
- auto shardingState = ShardingState::get(txn);
+ auto shardingState = ShardingState::get(opCtx);
if (shardingState->enabled() &&
serverGlobalParams.clusterRole != ClusterRole::ConfigServer) {
result.append("configsvrConnectionString",
- shardingState->getConfigServer(txn).toString());
+ shardingState->getConfigServer(opCtx).toString());
- Grid::get(txn)->configOpTime().append(&result, "lastSeenConfigServerOpTime");
+ Grid::get(opCtx)->configOpTime().append(&result, "lastSeenConfigServerOpTime");
// Get a migration status report if a migration is active for which this is the source
// shard. ShardingState::getActiveMigrationStatusReport will take an IS lock on the
// namespace of the active migration if there is one that is active.
- BSONObj migrationStatus = ShardingState::get(txn)->getActiveMigrationStatusReport(txn);
+ BSONObj migrationStatus =
+ ShardingState::get(opCtx)->getActiveMigrationStatusReport(opCtx);
if (!migrationStatus.isEmpty()) {
result.append("migrations", migrationStatus);
}
diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp
index 57fda772fb4..567877c216a 100644
--- a/src/mongo/db/s/sharding_state.cpp
+++ b/src/mongo/db/s/sharding_state.cpp
@@ -119,17 +119,17 @@ void updateShardIdentityConfigStringCB(const string& setName, const string& newC
}
}
-bool haveLocalShardingInfo(OperationContext* txn, const string& ns) {
- if (!ShardingState::get(txn)->enabled()) {
+bool haveLocalShardingInfo(OperationContext* opCtx, const string& ns) {
+ if (!ShardingState::get(opCtx)->enabled()) {
return false;
}
- const auto& oss = OperationShardingState::get(txn);
+ const auto& oss = OperationShardingState::get(opCtx);
if (oss.hasShardVersion()) {
return true;
}
- const auto& sci = ShardedConnectionInfo::get(txn->getClient(), false);
+ const auto& sci = ShardedConnectionInfo::get(opCtx->getClient(), false);
if (sci && !sci->getVersion(ns).isStrictlyEqualTo(ChunkVersion::UNSHARDED())) {
return true;
}
@@ -179,10 +179,10 @@ Status ShardingState::canAcceptShardedCommands() const {
}
}
-ConnectionString ShardingState::getConfigServer(OperationContext* txn) {
+ConnectionString ShardingState::getConfigServer(OperationContext* opCtx) {
invariant(enabled());
stdx::lock_guard<stdx::mutex> lk(_mutex);
- return Grid::get(txn)->shardRegistry()->getConfigServerConnectionString();
+ return Grid::get(opCtx)->shardRegistry()->getConfigServerConnectionString();
}
string ShardingState::getShardName() {
@@ -191,23 +191,23 @@ string ShardingState::getShardName() {
return _shardName;
}
-void ShardingState::shutDown(OperationContext* txn) {
+void ShardingState::shutDown(OperationContext* opCtx) {
stdx::unique_lock<stdx::mutex> lk(_mutex);
if (enabled()) {
grid.getExecutorPool()->shutdownAndJoin();
- grid.catalogClient(txn)->shutDown(txn);
+ grid.catalogClient(opCtx)->shutDown(opCtx);
}
}
-Status ShardingState::updateConfigServerOpTimeFromMetadata(OperationContext* txn) {
+Status ShardingState::updateConfigServerOpTimeFromMetadata(OperationContext* opCtx) {
if (!enabled()) {
// Nothing to do if sharding state has not been initialized.
return Status::OK();
}
- boost::optional<repl::OpTime> opTime = rpc::ConfigServerMetadata::get(txn).getOpTime();
+ boost::optional<repl::OpTime> opTime = rpc::ConfigServerMetadata::get(opCtx).getOpTime();
if (opTime) {
- if (!AuthorizationSession::get(txn->getClient())
+ if (!AuthorizationSession::get(opCtx->getClient())
->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(),
ActionType::internal)) {
return Status(ErrorCodes::Unauthorized, "Unauthorized to update config opTime");
@@ -219,14 +219,14 @@ Status ShardingState::updateConfigServerOpTimeFromMetadata(OperationContext* txn
return Status::OK();
}
-CollectionShardingState* ShardingState::getNS(const std::string& ns, OperationContext* txn) {
+CollectionShardingState* ShardingState::getNS(const std::string& ns, OperationContext* opCtx) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
CollectionShardingStateMap::iterator it = _collections.find(ns);
if (it == _collections.end()) {
auto inserted =
_collections.insert(make_pair(ns,
stdx::make_unique<CollectionShardingState>(
- txn->getServiceContext(), NamespaceString(ns))));
+ opCtx->getServiceContext(), NamespaceString(ns))));
invariant(inserted.second);
it = std::move(inserted.first);
}
@@ -254,18 +254,18 @@ void ShardingState::scheduleCleanup(const NamespaceString& nss) {
_scheduleWorkFn(nss);
}
-Status ShardingState::onStaleShardVersion(OperationContext* txn,
+Status ShardingState::onStaleShardVersion(OperationContext* opCtx,
const NamespaceString& nss,
const ChunkVersion& expectedVersion) {
- invariant(!txn->lockState()->isLocked());
+ invariant(!opCtx->lockState()->isLocked());
invariant(enabled());
LOG(2) << "metadata refresh requested for " << nss.ns() << " at shard version "
<< expectedVersion;
// Ensure any ongoing migrations have completed
- auto& oss = OperationShardingState::get(txn);
- oss.waitForMigrationCriticalSectionSignal(txn);
+ auto& oss = OperationShardingState::get(opCtx);
+ oss.waitForMigrationCriticalSectionSignal(opCtx);
ChunkVersion collectionShardVersion;
@@ -274,9 +274,9 @@ Status ShardingState::onStaleShardVersion(OperationContext* txn,
ScopedCollectionMetadata currentMetadata;
{
- AutoGetCollection autoColl(txn, nss, MODE_IS);
+ AutoGetCollection autoColl(opCtx, nss, MODE_IS);
- currentMetadata = CollectionShardingState::get(txn, nss)->getMetadata();
+ currentMetadata = CollectionShardingState::get(opCtx, nss)->getMetadata();
if (currentMetadata) {
collectionShardVersion = currentMetadata->getShardVersion();
}
@@ -290,23 +290,23 @@ Status ShardingState::onStaleShardVersion(OperationContext* txn,
}
auto refreshStatusAndVersion =
- _refreshMetadata(txn, nss, (currentMetadata ? currentMetadata.getMetadata() : nullptr));
+ _refreshMetadata(opCtx, nss, (currentMetadata ? currentMetadata.getMetadata() : nullptr));
return refreshStatusAndVersion.getStatus();
}
-Status ShardingState::refreshMetadataNow(OperationContext* txn,
+Status ShardingState::refreshMetadataNow(OperationContext* opCtx,
const NamespaceString& nss,
ChunkVersion* latestShardVersion) {
ScopedCollectionMetadata currentMetadata;
{
- AutoGetCollection autoColl(txn, nss, MODE_IS);
+ AutoGetCollection autoColl(opCtx, nss, MODE_IS);
- currentMetadata = CollectionShardingState::get(txn, nss)->getMetadata();
+ currentMetadata = CollectionShardingState::get(opCtx, nss)->getMetadata();
}
auto refreshLatestShardVersionStatus =
- _refreshMetadata(txn, nss, currentMetadata.getMetadata());
+ _refreshMetadata(opCtx, nss, currentMetadata.getMetadata());
if (!refreshLatestShardVersionStatus.isOK()) {
return refreshLatestShardVersionStatus.getStatus();
}
@@ -317,7 +317,7 @@ Status ShardingState::refreshMetadataNow(OperationContext* txn,
// NOTE: This method can be called inside a database lock so it should never take any database
// locks, perform I/O, or any long running operations.
-Status ShardingState::initializeFromShardIdentity(OperationContext* txn,
+Status ShardingState::initializeFromShardIdentity(OperationContext* opCtx,
const ShardIdentityType& shardIdentity) {
invariant(serverGlobalParams.clusterRole == ClusterRole::ShardServer);
@@ -360,7 +360,7 @@ Status ShardingState::initializeFromShardIdentity(OperationContext* txn,
ShardedConnectionInfo::addHook();
try {
- Status status = _globalInit(txn, configSvrConnStr, generateDistLockProcessId(txn));
+ Status status = _globalInit(opCtx, configSvrConnStr, generateDistLockProcessId(opCtx));
if (status.isOK()) {
log() << "initialized sharding components";
_setInitializationState(InitializationState::kInitialized);
@@ -398,7 +398,7 @@ void ShardingState::_setInitializationState(InitializationState newState) {
_initializationState.store(static_cast<uint32_t>(newState));
}
-StatusWith<bool> ShardingState::initializeShardingAwarenessIfNeeded(OperationContext* txn) {
+StatusWith<bool> ShardingState::initializeShardingAwarenessIfNeeded(OperationContext* opCtx) {
// In sharded readOnly mode, we ignore the shardIdentity document on disk and instead *require*
// a shardIdentity document to be passed through --overrideShardIdentity.
if (storageGlobalParams.readOnly) {
@@ -413,7 +413,7 @@ StatusWith<bool> ShardingState::initializeShardingAwarenessIfNeeded(OperationCon
if (!swOverrideShardIdentity.isOK()) {
return swOverrideShardIdentity.getStatus();
}
- auto status = initializeFromShardIdentity(txn, swOverrideShardIdentity.getValue());
+ auto status = initializeFromShardIdentity(opCtx, swOverrideShardIdentity.getValue());
if (!status.isOK()) {
return status;
}
@@ -448,12 +448,12 @@ StatusWith<bool> ShardingState::initializeShardingAwarenessIfNeeded(OperationCon
}
// Load the shardIdentity document from disk.
- invariant(!txn->lockState()->isLocked());
+ invariant(!opCtx->lockState()->isLocked());
BSONObj shardIdentityBSON;
bool foundShardIdentity = false;
try {
- AutoGetCollection autoColl(txn, NamespaceString::kConfigCollectionNamespace, MODE_IS);
- foundShardIdentity = Helpers::findOne(txn,
+ AutoGetCollection autoColl(opCtx, NamespaceString::kConfigCollectionNamespace, MODE_IS);
+ foundShardIdentity = Helpers::findOne(opCtx,
autoColl.getCollection(),
BSON("_id" << ShardIdentityType::IdName),
shardIdentityBSON);
@@ -477,7 +477,7 @@ StatusWith<bool> ShardingState::initializeShardingAwarenessIfNeeded(OperationCon
if (!swShardIdentity.isOK()) {
return swShardIdentity.getStatus();
}
- auto status = initializeFromShardIdentity(txn, swShardIdentity.getValue());
+ auto status = initializeFromShardIdentity(opCtx, swShardIdentity.getValue());
if (!status.isOK()) {
return status;
}
@@ -496,8 +496,10 @@ StatusWith<bool> ShardingState::initializeShardingAwarenessIfNeeded(OperationCon
}
StatusWith<ChunkVersion> ShardingState::_refreshMetadata(
- OperationContext* txn, const NamespaceString& nss, const CollectionMetadata* metadataForDiff) {
- invariant(!txn->lockState()->isLocked());
+ OperationContext* opCtx,
+ const NamespaceString& nss,
+ const CollectionMetadata* metadataForDiff) {
+ invariant(!opCtx->lockState()->isLocked());
invariant(enabled());
@@ -533,8 +535,8 @@ StatusWith<ChunkVersion> ShardingState::_refreshMetadata(
<< (metadataForDiff ? metadataForDiff->getCollVersion().toString() : "(empty)");
remoteMetadata = stdx::make_unique<CollectionMetadata>();
- status = MetadataLoader::makeCollectionMetadata(txn,
- grid.catalogClient(txn),
+ status = MetadataLoader::makeCollectionMetadata(opCtx,
+ grid.catalogClient(opCtx),
nss.ns(),
getShardName(),
metadataForDiff,
@@ -550,21 +552,21 @@ StatusWith<ChunkVersion> ShardingState::_refreshMetadata(
}
// Exclusive collection lock needed since we're now changing the metadata
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetCollection autoColl(txn, nss, MODE_IX, MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ AutoGetCollection autoColl(opCtx, nss, MODE_IX, MODE_X);
- auto css = CollectionShardingState::get(txn, nss);
+ auto css = CollectionShardingState::get(opCtx, nss);
if (!status.isOK()) {
invariant(status == ErrorCodes::NamespaceNotFound);
- css->refreshMetadata(txn, nullptr);
+ css->refreshMetadata(opCtx, nullptr);
log() << "MetadataLoader took " << t.millis() << " ms and did not find the namespace";
return ChunkVersion::UNSHARDED();
}
- css->refreshMetadata(txn, std::move(remoteMetadata));
+ css->refreshMetadata(opCtx, std::move(remoteMetadata));
auto metadata = css->getMetadata();
@@ -588,11 +590,11 @@ boost::optional<NamespaceString> ShardingState::getActiveDonateChunkNss() {
return _activeMigrationsRegistry.getActiveDonateChunkNss();
}
-BSONObj ShardingState::getActiveMigrationStatusReport(OperationContext* txn) {
- return _activeMigrationsRegistry.getActiveMigrationStatusReport(txn);
+BSONObj ShardingState::getActiveMigrationStatusReport(OperationContext* opCtx) {
+ return _activeMigrationsRegistry.getActiveMigrationStatusReport(opCtx);
}
-void ShardingState::appendInfo(OperationContext* txn, BSONObjBuilder& builder) {
+void ShardingState::appendInfo(OperationContext* opCtx, BSONObjBuilder& builder) {
const bool isEnabled = enabled();
builder.appendBool("enabled", isEnabled);
if (!isEnabled)
@@ -620,19 +622,19 @@ void ShardingState::appendInfo(OperationContext* txn, BSONObjBuilder& builder) {
versionB.done();
}
-bool ShardingState::needCollectionMetadata(OperationContext* txn, const string& ns) {
+bool ShardingState::needCollectionMetadata(OperationContext* opCtx, const string& ns) {
if (!enabled())
return false;
- Client* client = txn->getClient();
+ Client* client = opCtx->getClient();
// Shard version information received from mongos may either by attached to the Client or
// directly to the OperationContext.
return ShardedConnectionInfo::get(client, false) ||
- OperationShardingState::get(txn).hasShardVersion();
+ OperationShardingState::get(opCtx).hasShardVersion();
}
-Status ShardingState::updateShardIdentityConfigString(OperationContext* txn,
+Status ShardingState::updateShardIdentityConfigString(OperationContext* opCtx,
const std::string& newConnectionString) {
BSONObj updateObj(ShardIdentityType::createConfigServerUpdateObject(newConnectionString));
@@ -643,9 +645,9 @@ Status ShardingState::updateShardIdentityConfigString(OperationContext* txn,
updateReq.setLifecycle(&updateLifecycle);
try {
- AutoGetOrCreateDb autoDb(txn, NamespaceString::kConfigCollectionNamespace.db(), MODE_X);
+ AutoGetOrCreateDb autoDb(opCtx, NamespaceString::kConfigCollectionNamespace.db(), MODE_X);
- auto result = update(txn, autoDb.getDb(), updateReq);
+ auto result = update(opCtx, autoDb.getDb(), updateReq);
if (result.numMatched == 0) {
warning() << "failed to update config string of shard identity document because "
<< "it does not exist. This shard could have been removed from the cluster";
diff --git a/src/mongo/db/s/sharding_state.h b/src/mongo/db/s/sharding_state.h
index 6686dc8deca..96b650ad803 100644
--- a/src/mongo/db/s/sharding_state.h
+++ b/src/mongo/db/s/sharding_state.h
@@ -111,7 +111,7 @@ public:
*/
Status canAcceptShardedCommands() const;
- ConnectionString getConfigServer(OperationContext* txn);
+ ConnectionString getConfigServer(OperationContext* opCtx);
std::string getShardName();
@@ -122,21 +122,21 @@ public:
/**
* Initializes the sharding state of this server from the shard identity document argument.
*/
- Status initializeFromShardIdentity(OperationContext* txn,
+ Status initializeFromShardIdentity(OperationContext* opCtx,
const ShardIdentityType& shardIdentity);
/**
* Shuts down sharding machinery on the shard.
*/
- void shutDown(OperationContext* txn);
+ void shutDown(OperationContext* opCtx);
/**
* Updates the ShardRegistry's stored notion of the config server optime based on the
* ConfigServerMetadata decoration attached to the OperationContext.
*/
- Status updateConfigServerOpTimeFromMetadata(OperationContext* txn);
+ Status updateConfigServerOpTimeFromMetadata(OperationContext* opCtx);
- CollectionShardingState* getNS(const std::string& ns, OperationContext* txn);
+ CollectionShardingState* getNS(const std::string& ns, OperationContext* opCtx);
/**
* Iterates through all known sharded collections and marks them (in memory only) as not sharded
@@ -148,7 +148,7 @@ public:
* Refreshes the local metadata based on whether the expected version is higher than what we
* have cached.
*/
- Status onStaleShardVersion(OperationContext* txn,
+ Status onStaleShardVersion(OperationContext* opCtx,
const NamespaceString& nss,
const ChunkVersion& expectedVersion);
@@ -174,13 +174,13 @@ public:
* @return !OK if something else went wrong during reload
* @return latestShardVersion the version that is now stored for this collection
*/
- Status refreshMetadataNow(OperationContext* txn,
+ Status refreshMetadataNow(OperationContext* opCtx,
const NamespaceString& nss,
ChunkVersion* latestShardVersion);
- void appendInfo(OperationContext* txn, BSONObjBuilder& b);
+ void appendInfo(OperationContext* opCtx, BSONObjBuilder& b);
- bool needCollectionMetadata(OperationContext* txn, const std::string& ns);
+ bool needCollectionMetadata(OperationContext* opCtx, const std::string& ns);
/**
* Updates the config server field of the shardIdentity document with the given connection
@@ -188,7 +188,7 @@ public:
*
* Note: this can return NotMaster error.
*/
- Status updateShardIdentityConfigString(OperationContext* txn,
+ Status updateShardIdentityConfigString(OperationContext* opCtx,
const std::string& newConnectionString);
/**
@@ -229,7 +229,7 @@ public:
*
* Takes an IS lock on the namespace of the active migration, if one is active.
*/
- BSONObj getActiveMigrationStatusReport(OperationContext* txn);
+ BSONObj getActiveMigrationStatusReport(OperationContext* opCtx);
/**
* For testing only. Mock the initialization method used by initializeFromConfigConnString and
@@ -266,7 +266,7 @@ public:
* exception of the duplicate ShardRegistry reload in ShardRegistry::startup() (see
* SERVER-26123). Outgoing networking calls to cluster members can now be made.
*/
- StatusWith<bool> initializeShardingAwarenessIfNeeded(OperationContext* txn);
+ StatusWith<bool> initializeShardingAwarenessIfNeeded(OperationContext* opCtx);
private:
// Map from a namespace into the sharding state for each collection we have
@@ -307,7 +307,7 @@ private:
* The metadataForDiff argument indicates that the specified metadata should be used as a base
* from which to only load the differences. If nullptr is passed, a full reload will be done.
*/
- StatusWith<ChunkVersion> _refreshMetadata(OperationContext* txn,
+ StatusWith<ChunkVersion> _refreshMetadata(OperationContext* opCtx,
const NamespaceString& nss,
const CollectionMetadata* metadataForDiff);
diff --git a/src/mongo/db/s/sharding_state_command.cpp b/src/mongo/db/s/sharding_state_command.cpp
index fe643086dd5..86606ef3598 100644
--- a/src/mongo/db/s/sharding_state_command.cpp
+++ b/src/mongo/db/s/sharding_state_command.cpp
@@ -67,13 +67,13 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
std::string& errmsg,
BSONObjBuilder& result) override {
- ShardingState::get(txn)->appendInfo(txn, result);
+ ShardingState::get(opCtx)->appendInfo(opCtx, result);
return true;
}
diff --git a/src/mongo/db/s/sharding_state_recovery.cpp b/src/mongo/db/s/sharding_state_recovery.cpp
index 7f17b748a90..72ee2b87028 100644
--- a/src/mongo/db/s/sharding_state_recovery.cpp
+++ b/src/mongo/db/s/sharding_state_recovery.cpp
@@ -182,17 +182,17 @@ private:
* it has is to always move the opTime forward for a currently running server. It achieves this by
* serializing the modify calls and reading the current opTime under X-lock on the admin database.
*/
-Status modifyRecoveryDocument(OperationContext* txn,
+Status modifyRecoveryDocument(OperationContext* opCtx,
RecoveryDocument::ChangeType change,
const WriteConcernOptions& writeConcern) {
try {
// Use boost::optional so we can release the locks early
boost::optional<AutoGetOrCreateDb> autoGetOrCreateDb;
- autoGetOrCreateDb.emplace(txn, NamespaceString::kConfigCollectionNamespace.db(), MODE_X);
+ autoGetOrCreateDb.emplace(opCtx, NamespaceString::kConfigCollectionNamespace.db(), MODE_X);
BSONObj updateObj = RecoveryDocument::createChangeObj(
grid.shardRegistry()->getConfigServerConnectionString(),
- ShardingState::get(txn)->getShardName(),
+ ShardingState::get(opCtx)->getShardName(),
grid.configOpTime(),
change);
@@ -205,7 +205,7 @@ Status modifyRecoveryDocument(OperationContext* txn,
UpdateLifecycleImpl updateLifecycle(NamespaceString::kConfigCollectionNamespace);
updateReq.setLifecycle(&updateLifecycle);
- UpdateResult result = update(txn, autoGetOrCreateDb->getDb(), updateReq);
+ UpdateResult result = update(opCtx, autoGetOrCreateDb->getDb(), updateReq);
invariant(result.numDocsModified == 1 || !result.upserted.isEmpty());
invariant(result.numMatched <= 1);
@@ -213,8 +213,8 @@ Status modifyRecoveryDocument(OperationContext* txn,
autoGetOrCreateDb = boost::none;
WriteConcernResult writeConcernResult;
- return waitForWriteConcern(txn,
- repl::ReplClientInfo::forClient(txn->getClient()).getLastOp(),
+ return waitForWriteConcern(opCtx,
+ repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(),
writeConcern,
&writeConcernResult);
} catch (const DBException& ex) {
@@ -224,28 +224,29 @@ Status modifyRecoveryDocument(OperationContext* txn,
} // namespace
-Status ShardingStateRecovery::startMetadataOp(OperationContext* txn) {
+Status ShardingStateRecovery::startMetadataOp(OperationContext* opCtx) {
Status upsertStatus =
- modifyRecoveryDocument(txn, RecoveryDocument::Increment, kMajorityWriteConcern);
+ modifyRecoveryDocument(opCtx, RecoveryDocument::Increment, kMajorityWriteConcern);
if (upsertStatus == ErrorCodes::WriteConcernFailed) {
// Couldn't wait for the replication to complete, but the local write was performed. Clear
// it up fast (without any waiting for journal or replication) and still treat it as
// failure.
- modifyRecoveryDocument(txn, RecoveryDocument::Decrement, WriteConcernOptions());
+ modifyRecoveryDocument(opCtx, RecoveryDocument::Decrement, WriteConcernOptions());
}
return upsertStatus;
}
-void ShardingStateRecovery::endMetadataOp(OperationContext* txn) {
- Status status = modifyRecoveryDocument(txn, RecoveryDocument::Decrement, WriteConcernOptions());
+void ShardingStateRecovery::endMetadataOp(OperationContext* opCtx) {
+ Status status =
+ modifyRecoveryDocument(opCtx, RecoveryDocument::Decrement, WriteConcernOptions());
if (!status.isOK()) {
warning() << "Failed to decrement minOpTimeUpdaters due to " << redact(status);
}
}
-Status ShardingStateRecovery::recover(OperationContext* txn) {
+Status ShardingStateRecovery::recover(OperationContext* opCtx) {
if (serverGlobalParams.clusterRole != ClusterRole::ShardServer) {
return Status::OK();
}
@@ -253,9 +254,9 @@ Status ShardingStateRecovery::recover(OperationContext* txn) {
BSONObj recoveryDocBSON;
try {
- AutoGetCollection autoColl(txn, NamespaceString::kConfigCollectionNamespace, MODE_IS);
+ AutoGetCollection autoColl(opCtx, NamespaceString::kConfigCollectionNamespace, MODE_IS);
if (!Helpers::findOne(
- txn, autoColl.getCollection(), RecoveryDocument::getQuery(), recoveryDocBSON)) {
+ opCtx, autoColl.getCollection(), RecoveryDocument::getQuery(), recoveryDocBSON)) {
return Status::OK();
}
} catch (const DBException& ex) {
@@ -270,7 +271,7 @@ Status ShardingStateRecovery::recover(OperationContext* txn) {
log() << "Sharding state recovery process found document " << redact(recoveryDoc.toBSON());
- ShardingState* const shardingState = ShardingState::get(txn);
+ ShardingState* const shardingState = ShardingState::get(opCtx);
invariant(shardingState->enabled());
if (!recoveryDoc.getMinOpTimeUpdaters()) {
@@ -286,18 +287,18 @@ Status ShardingStateRecovery::recover(OperationContext* txn) {
// Need to fetch the latest uptime from the config server, so do a logging write
Status status =
- grid.catalogClient(txn)->logChange(txn,
- "Sharding minOpTime recovery",
- NamespaceString::kConfigCollectionNamespace.ns(),
- recoveryDocBSON,
- ShardingCatalogClient::kMajorityWriteConcern);
+ grid.catalogClient(opCtx)->logChange(opCtx,
+ "Sharding minOpTime recovery",
+ NamespaceString::kConfigCollectionNamespace.ns(),
+ recoveryDocBSON,
+ ShardingCatalogClient::kMajorityWriteConcern);
if (!status.isOK())
return status;
log() << "Sharding state recovered. New config server opTime is " << grid.configOpTime();
// Finally, clear the recovery document so next time we don't need to recover
- status = modifyRecoveryDocument(txn, RecoveryDocument::Clear, kLocalWriteConcern);
+ status = modifyRecoveryDocument(opCtx, RecoveryDocument::Clear, kLocalWriteConcern);
if (!status.isOK()) {
warning() << "Failed to reset sharding state recovery document due to " << redact(status);
}
diff --git a/src/mongo/db/s/sharding_state_recovery.h b/src/mongo/db/s/sharding_state_recovery.h
index c1b31e351fc..2960be472ba 100644
--- a/src/mongo/db/s/sharding_state_recovery.h
+++ b/src/mongo/db/s/sharding_state_recovery.h
@@ -53,13 +53,13 @@ public:
* server's minOpTime after node failure. It is only safe to commence the operation after this
* method returns an OK status.
*/
- static Status startMetadataOp(OperationContext* txn);
+ static Status startMetadataOp(OperationContext* opCtx);
/**
* Marks the end of a sharding metadata operation, persisting the latest config server opTime at
* the time of the call.
*/
- static void endMetadataOp(OperationContext* txn);
+ static void endMetadataOp(OperationContext* opCtx);
/**
* Recovers the minimal config server opTime that the instance should be using for reading
@@ -71,7 +71,7 @@ public:
* Returns OK if the minOpTime was successfully recovered or failure status otherwise. It is
* unsafe to read and rely on any sharding metadata before this method has returned success.
*/
- static Status recover(OperationContext* txn);
+ static Status recover(OperationContext* opCtx);
};
} // namespace mongo
diff --git a/src/mongo/db/s/sharding_state_test.cpp b/src/mongo/db/s/sharding_state_test.cpp
index dce1326b0b9..aa63085b60a 100644
--- a/src/mongo/db/s/sharding_state_test.cpp
+++ b/src/mongo/db/s/sharding_state_test.cpp
@@ -100,7 +100,7 @@ protected:
// When sharding initialization is triggered, initialize sharding state as a shard server.
serverGlobalParams.clusterRole = ClusterRole::ShardServer;
- _shardingState.setGlobalInitMethodForTest([&](OperationContext* txn,
+ _shardingState.setGlobalInitMethodForTest([&](OperationContext* opCtx,
const ConnectionString& configConnStr,
StringData distLockProcessId) {
auto status = initializeGlobalShardingStateForMongodForTest(configConnStr);
@@ -170,7 +170,7 @@ TEST_F(ShardingStateTest, InitWhilePreviouslyInErrorStateWillStayInErrorState) {
shardIdentity.setClusterId(OID::gen());
shardingState()->setGlobalInitMethodForTest(
- [](OperationContext* txn, const ConnectionString& connStr, StringData distLockProcessId) {
+ [](OperationContext* opCtx, const ConnectionString& connStr, StringData distLockProcessId) {
return Status{ErrorCodes::ShutdownInProgress, "shutting down"};
});
@@ -183,7 +183,7 @@ TEST_F(ShardingStateTest, InitWhilePreviouslyInErrorStateWillStayInErrorState) {
// ShardingState is now in error state, attempting to call it again will still result in error.
shardingState()->setGlobalInitMethodForTest(
- [](OperationContext* txn, const ConnectionString& connStr, StringData distLockProcessId) {
+ [](OperationContext* opCtx, const ConnectionString& connStr, StringData distLockProcessId) {
return Status::OK();
});
@@ -213,7 +213,7 @@ TEST_F(ShardingStateTest, InitializeAgainWithMatchingShardIdentitySucceeds) {
shardIdentity2.setClusterId(clusterID);
shardingState()->setGlobalInitMethodForTest(
- [](OperationContext* txn, const ConnectionString& connStr, StringData distLockProcessId) {
+ [](OperationContext* opCtx, const ConnectionString& connStr, StringData distLockProcessId) {
return Status{ErrorCodes::InternalError, "should not reach here"};
});
@@ -241,7 +241,7 @@ TEST_F(ShardingStateTest, InitializeAgainWithSameReplSetNameSucceeds) {
shardIdentity2.setClusterId(clusterID);
shardingState()->setGlobalInitMethodForTest(
- [](OperationContext* txn, const ConnectionString& connStr, StringData distLockProcessId) {
+ [](OperationContext* opCtx, const ConnectionString& connStr, StringData distLockProcessId) {
return Status{ErrorCodes::InternalError, "should not reach here"};
});
diff --git a/src/mongo/db/s/split_chunk_command.cpp b/src/mongo/db/s/split_chunk_command.cpp
index 09d7f147b27..703143cfd67 100644
--- a/src/mongo/db/s/split_chunk_command.cpp
+++ b/src/mongo/db/s/split_chunk_command.cpp
@@ -65,7 +65,7 @@ namespace {
const ReadPreferenceSetting kPrimaryOnlyReadPreference{ReadPreference::PrimaryOnly};
-bool checkIfSingleDoc(OperationContext* txn,
+bool checkIfSingleDoc(OperationContext* opCtx,
Collection* collection,
const IndexDescriptor* idx,
const ChunkType* chunk) {
@@ -73,7 +73,7 @@ bool checkIfSingleDoc(OperationContext* txn,
BSONObj newmin = Helpers::toKeyFormat(kp.extendRangeBound(chunk->getMin(), false));
BSONObj newmax = Helpers::toKeyFormat(kp.extendRangeBound(chunk->getMax(), true));
- unique_ptr<PlanExecutor> exec(InternalPlanner::indexScan(txn,
+ unique_ptr<PlanExecutor> exec(InternalPlanner::indexScan(opCtx,
collection,
idx,
newmin,
@@ -100,16 +100,16 @@ bool checkIfSingleDoc(OperationContext* txn,
// using the specified splitPoints. Returns false if the metadata's chunks don't match
// the new chunk boundaries exactly.
//
-bool _checkMetadataForSuccess(OperationContext* txn,
+bool _checkMetadataForSuccess(OperationContext* opCtx,
const NamespaceString& nss,
const ChunkRange& chunkRange,
const std::vector<BSONObj>& splitKeys) {
ScopedCollectionMetadata metadataAfterSplit;
{
- AutoGetCollection autoColl(txn, nss, MODE_IS);
+ AutoGetCollection autoColl(opCtx, nss, MODE_IS);
// Get collection metadata
- metadataAfterSplit = CollectionShardingState::get(txn, nss.ns())->getMetadata();
+ metadataAfterSplit = CollectionShardingState::get(opCtx, nss.ns())->getMetadata();
}
auto newChunkBounds(splitKeys);
@@ -167,13 +167,13 @@ public:
return parseNsFullyQualified(dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
std::string& errmsg,
BSONObjBuilder& result) override {
- auto shardingState = ShardingState::get(txn);
+ auto shardingState = ShardingState::get(opCtx);
uassertStatusOK(shardingState->canAcceptShardedCommands());
//
@@ -233,8 +233,8 @@ public:
const string whyMessage(str::stream() << "splitting chunk [" << min << ", " << max
<< ") in "
<< nss.toString());
- auto scopedDistLock = grid.catalogClient(txn)->getDistLockManager()->lock(
- txn, nss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout);
+ auto scopedDistLock = grid.catalogClient(opCtx)->getDistLockManager()->lock(
+ opCtx, nss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout);
if (!scopedDistLock.isOK()) {
errmsg = str::stream() << "could not acquire collection lock for " << nss.toString()
<< " to split chunk [" << redact(min) << "," << redact(max)
@@ -245,7 +245,7 @@ public:
// Always check our version remotely
ChunkVersion shardVersion;
- Status refreshStatus = shardingState->refreshMetadataNow(txn, nss, &shardVersion);
+ Status refreshStatus = shardingState->refreshMetadataNow(opCtx, nss, &shardVersion);
if (!refreshStatus.isOK()) {
errmsg = str::stream() << "splitChunk cannot split chunk "
@@ -266,7 +266,7 @@ public:
return false;
}
- const auto& oss = OperationShardingState::get(txn);
+ const auto& oss = OperationShardingState::get(opCtx);
uassert(ErrorCodes::InvalidOptions, "collection version is missing", oss.hasShardVersion());
// Even though the splitChunk command transmits a value in the operation's shardVersion
@@ -286,10 +286,10 @@ public:
ScopedCollectionMetadata collMetadata;
{
- AutoGetCollection autoColl(txn, nss, MODE_IS);
+ AutoGetCollection autoColl(opCtx, nss, MODE_IS);
// Get collection metadata
- collMetadata = CollectionShardingState::get(txn, nss.ns())->getMetadata();
+ collMetadata = CollectionShardingState::get(opCtx, nss.ns())->getMetadata();
}
// With nonzero shard version, we must have metadata
@@ -313,8 +313,8 @@ public:
request.toConfigCommandBSON(ShardingCatalogClient::kMajorityWriteConcern.toBSON());
auto cmdResponseStatus =
- Grid::get(txn)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
- txn,
+ Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
+ opCtx,
kPrimaryOnlyReadPreference,
"admin",
configCmdObj,
@@ -325,7 +325,7 @@ public:
//
{
ChunkVersion unusedShardVersion;
- refreshStatus = shardingState->refreshMetadataNow(txn, nss, &unusedShardVersion);
+ refreshStatus = shardingState->refreshMetadataNow(opCtx, nss, &unusedShardVersion);
if (!refreshStatus.isOK()) {
errmsg = str::stream() << "failed to refresh metadata for split chunk ["
@@ -368,7 +368,7 @@ public:
// succeeds, thus the automatic retry fails with a precondition violation, for example.
//
if ((!commandStatus.isOK() || !writeConcernStatus.isOK()) &&
- _checkMetadataForSuccess(txn, nss, chunkRange, splitKeys)) {
+ _checkMetadataForSuccess(opCtx, nss, chunkRange, splitKeys)) {
LOG(1) << "splitChunk [" << redact(min) << "," << redact(max)
<< ") has already been committed.";
@@ -381,7 +381,7 @@ public:
// Select chunk to move out for "top chunk optimization".
KeyPattern shardKeyPattern(collMetadata->getKeyPattern());
- AutoGetCollection autoColl(txn, nss, MODE_IS);
+ AutoGetCollection autoColl(opCtx, nss, MODE_IS);
Collection* const collection = autoColl.getCollection();
if (!collection) {
@@ -393,7 +393,7 @@ public:
// Allow multiKey based on the invariant that shard keys must be single-valued. Therefore,
// any multi-key index prefixed by shard key cannot be multikey over the shard key fields.
IndexDescriptor* idx =
- collection->getIndexCatalog()->findShardKeyPrefixedIndex(txn, keyPatternObj, false);
+ collection->getIndexCatalog()->findShardKeyPrefixedIndex(opCtx, keyPatternObj, false);
if (!idx) {
return true;
}
@@ -407,11 +407,11 @@ public:
frontChunk.setMax(splitKeys.front());
if (shardKeyPattern.globalMax().woCompare(backChunk.getMax()) == 0 &&
- checkIfSingleDoc(txn, collection, idx, &backChunk)) {
+ checkIfSingleDoc(opCtx, collection, idx, &backChunk)) {
result.append("shouldMigrate",
BSON("min" << backChunk.getMin() << "max" << backChunk.getMax()));
} else if (shardKeyPattern.globalMin().woCompare(frontChunk.getMin()) == 0 &&
- checkIfSingleDoc(txn, collection, idx, &frontChunk)) {
+ checkIfSingleDoc(opCtx, collection, idx, &frontChunk)) {
result.append("shouldMigrate",
BSON("min" << frontChunk.getMin() << "max" << frontChunk.getMax()));
}
diff --git a/src/mongo/db/s/split_vector_command.cpp b/src/mongo/db/s/split_vector_command.cpp
index f02a7d68fa3..73a424e5d27 100644
--- a/src/mongo/db/s/split_vector_command.cpp
+++ b/src/mongo/db/s/split_vector_command.cpp
@@ -112,7 +112,7 @@ public:
return parseNsFullyQualified(dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& jsobj,
int options,
@@ -157,7 +157,7 @@ public:
{
// Get the size estimate for this namespace
- AutoGetCollection autoColl(txn, nss, MODE_IS);
+ AutoGetCollection autoColl(opCtx, nss, MODE_IS);
Collection* const collection = autoColl.getCollection();
if (!collection) {
@@ -169,7 +169,7 @@ public:
// Therefore, any multi-key index prefixed by shard key cannot be multikey over
// the shard key fields.
IndexDescriptor* idx =
- collection->getIndexCatalog()->findShardKeyPrefixedIndex(txn, keyPattern, false);
+ collection->getIndexCatalog()->findShardKeyPrefixedIndex(opCtx, keyPattern, false);
if (idx == NULL) {
errmsg = (string) "couldn't find index over splitting key " +
keyPattern.clientReadable().toString();
@@ -186,8 +186,8 @@ public:
max = Helpers::toKeyFormat(kp.extendRangeBound(max, false));
}
- const long long recCount = collection->numRecords(txn);
- const long long dataSize = collection->dataSize(txn);
+ const long long recCount = collection->numRecords(opCtx);
+ const long long dataSize = collection->dataSize(opCtx);
//
// 1.b Now that we have the size estimate, go over the remaining parameters and apply
@@ -260,7 +260,7 @@ public:
long long numChunks = 0;
unique_ptr<PlanExecutor> exec(
- InternalPlanner::indexScan(txn,
+ InternalPlanner::indexScan(opCtx,
collection,
idx,
min,
@@ -336,7 +336,7 @@ public:
log() << "splitVector doing another cycle because of force, keyCount now: "
<< keyCount;
- exec = InternalPlanner::indexScan(txn,
+ exec = InternalPlanner::indexScan(opCtx,
collection,
idx,
min,
diff --git a/src/mongo/db/s/unset_sharding_command.cpp b/src/mongo/db/s/unset_sharding_command.cpp
index 9aa63819135..7155b35bce8 100644
--- a/src/mongo/db/s/unset_sharding_command.cpp
+++ b/src/mongo/db/s/unset_sharding_command.cpp
@@ -72,13 +72,13 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
std::string& errmsg,
BSONObjBuilder& result) override {
- ShardedConnectionInfo::reset(txn->getClient());
+ ShardedConnectionInfo::reset(opCtx->getClient());
return true;
}
diff --git a/src/mongo/db/server_parameters.h b/src/mongo/db/server_parameters.h
index be061848df8..c5ff2e7ae46 100644
--- a/src/mongo/db/server_parameters.h
+++ b/src/mongo/db/server_parameters.h
@@ -81,7 +81,7 @@ public:
}
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) = 0;
+ virtual void append(OperationContext* opCtx, BSONObjBuilder& b, const std::string& name) = 0;
virtual Status set(const BSONElement& newValueElement) = 0;
@@ -218,7 +218,7 @@ public:
_value(value) {}
virtual ~ExportedServerParameter() {}
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name);
+ virtual void append(OperationContext* opCtx, BSONObjBuilder& b, const std::string& name);
virtual Status set(const BSONElement& newValueElement);
virtual Status set(const T& newValue);
diff --git a/src/mongo/db/server_parameters_inline.h b/src/mongo/db/server_parameters_inline.h
index 4a4c5e6c88f..21f862cf105 100644
--- a/src/mongo/db/server_parameters_inline.h
+++ b/src/mongo/db/server_parameters_inline.h
@@ -38,22 +38,22 @@ namespace mongo {
// for a value type is chosen by the server_parameter_storage_type type trait. Since there is no
// support for partial template specialization of member functions, we generate 4 (the Atomic types)
// x 2 (RuntimeOnly, StartupAndRuntime) implementations of append and set.
-#define EXPORTED_ATOMIC_SERVER_PARAMETER_TYPE(VALUE_TYPE, PARAM_TYPE) \
- template <> \
- inline void ExportedServerParameter<VALUE_TYPE, PARAM_TYPE>::append( \
- OperationContext* txn, BSONObjBuilder& b, const std::string& name) { \
- b.append(name, _value->load()); \
- } \
- \
- template <> \
- inline Status ExportedServerParameter<VALUE_TYPE, PARAM_TYPE>::set( \
- const VALUE_TYPE& newValue) { \
- Status v = validate(newValue); \
- if (!v.isOK()) \
- return v; \
- \
- _value->store(newValue); \
- return Status::OK(); \
+#define EXPORTED_ATOMIC_SERVER_PARAMETER_TYPE(VALUE_TYPE, PARAM_TYPE) \
+ template <> \
+ inline void ExportedServerParameter<VALUE_TYPE, PARAM_TYPE>::append( \
+ OperationContext* opCtx, BSONObjBuilder& b, const std::string& name) { \
+ b.append(name, _value->load()); \
+ } \
+ \
+ template <> \
+ inline Status ExportedServerParameter<VALUE_TYPE, PARAM_TYPE>::set( \
+ const VALUE_TYPE& newValue) { \
+ Status v = validate(newValue); \
+ if (!v.isOK()) \
+ return v; \
+ \
+ _value->store(newValue); \
+ return Status::OK(); \
}
#define EXPORTED_ATOMIC_SERVER_PARAMETER(PARAM_TYPE) \
@@ -86,7 +86,7 @@ inline Status ExportedServerParameter<T, paramType>::set(const T& newValue) {
}
template <typename T, ServerParameterType paramType>
-void ExportedServerParameter<T, paramType>::append(OperationContext* txn,
+void ExportedServerParameter<T, paramType>::append(OperationContext* opCtx,
BSONObjBuilder& b,
const std::string& name) {
b.append(name, *_value);
diff --git a/src/mongo/db/server_parameters_test.cpp b/src/mongo/db/server_parameters_test.cpp
index 3b4783bcbbd..8ce7c9fe141 100644
--- a/src/mongo/db/server_parameters_test.cpp
+++ b/src/mongo/db/server_parameters_test.cpp
@@ -72,8 +72,8 @@ TEST(ServerParameters, Vector1) {
BSONObjBuilder b;
- OperationContextNoop txn;
- vv.append(&txn, b, vv.name());
+ OperationContextNoop opCtx;
+ vv.append(&opCtx, b, vv.name());
BSONObj y = b.obj();
ASSERT(x.firstElement().woCompare(y.firstElement(), false) == 0);
diff --git a/src/mongo/db/service_context.cpp b/src/mongo/db/service_context.cpp
index 09af7608dd0..1509fca8367 100644
--- a/src/mongo/db/service_context.cpp
+++ b/src/mongo/db/service_context.cpp
@@ -315,7 +315,7 @@ void ServiceContext::killOperation(OperationContext* opCtx, ErrorCodes::Error ki
}
}
-void ServiceContext::killAllUserOperations(const OperationContext* txn,
+void ServiceContext::killAllUserOperations(const OperationContext* opCtx,
ErrorCodes::Error killCode) {
for (LockedClientsCursor cursor(this); Client* client = cursor.next();) {
if (!client->isFromUserConnection()) {
@@ -327,7 +327,7 @@ void ServiceContext::killAllUserOperations(const OperationContext* txn,
OperationContext* toKill = client->getOperationContext();
// Don't kill ourself.
- if (toKill && toKill->getOpID() != txn->getOpID()) {
+ if (toKill && toKill->getOpID() != opCtx->getOpID()) {
killOperation(toKill, killCode);
}
}
diff --git a/src/mongo/db/service_context.h b/src/mongo/db/service_context.h
index c1895bf4065..ad5af47e360 100644
--- a/src/mongo/db/service_context.h
+++ b/src/mongo/db/service_context.h
@@ -286,17 +286,19 @@ public:
}
/**
- * Kills the operation "txn" with the code "killCode", if txn has not already been killed.
- * Caller must own the lock on txn->getClient, and txn->getServiceContext() must be the same as
+ * Kills the operation "opCtx" with the code "killCode", if opCtx has not already been killed.
+ * Caller must own the lock on opCtx->getClient, and opCtx->getServiceContext() must be the same
+ *as
* this service context.
**/
- void killOperation(OperationContext* txn, ErrorCodes::Error killCode = ErrorCodes::Interrupted);
+ void killOperation(OperationContext* opCtx,
+ ErrorCodes::Error killCode = ErrorCodes::Interrupted);
/**
* Kills all operations that have a Client that is associated with an incoming user
- * connection, except for the one associated with txn.
+ * connection, except for the one associated with opCtx.
*/
- void killAllUserOperations(const OperationContext* txn, ErrorCodes::Error killCode);
+ void killAllUserOperations(const OperationContext* opCtx, ErrorCodes::Error killCode);
/**
* Registers a listener to be notified each time an op is killed.
diff --git a/src/mongo/db/service_context_d_test_fixture.cpp b/src/mongo/db/service_context_d_test_fixture.cpp
index 238f0936a0a..194011842ee 100644
--- a/src/mongo/db/service_context_d_test_fixture.cpp
+++ b/src/mongo/db/service_context_d_test_fixture.cpp
@@ -76,34 +76,34 @@ void ServiceContextMongoDTest::setUp() {
void ServiceContextMongoDTest::tearDown() {
ON_BLOCK_EXIT([&] { Client::destroy(); });
- auto txn = cc().makeOperationContext();
- _dropAllDBs(txn.get());
+ auto opCtx = cc().makeOperationContext();
+ _dropAllDBs(opCtx.get());
}
ServiceContext* ServiceContextMongoDTest::getServiceContext() {
return getGlobalServiceContext();
}
-void ServiceContextMongoDTest::_dropAllDBs(OperationContext* txn) {
- dropAllDatabasesExceptLocal(txn);
+void ServiceContextMongoDTest::_dropAllDBs(OperationContext* opCtx) {
+ dropAllDatabasesExceptLocal(opCtx);
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
- AutoGetDb autoDBLocal(txn, "local", MODE_X);
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite lk(opCtx->lockState());
+ AutoGetDb autoDBLocal(opCtx, "local", MODE_X);
const auto localDB = autoDBLocal.getDb();
if (localDB) {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
// Do not wrap in a WriteUnitOfWork until SERVER-17103 is addressed.
- autoDBLocal.getDb()->dropDatabase(txn, localDB);
+ autoDBLocal.getDb()->dropDatabase(opCtx, localDB);
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "_dropAllDBs", "local");
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "_dropAllDBs", "local");
}
// dropAllDatabasesExceptLocal() does not close empty databases. However the holder still
// allocates resources to track these empty databases. These resources not released by
// dropAllDatabasesExceptLocal() will be leaked at exit unless we call DatabaseHolder::closeAll.
BSONObjBuilder unused;
- invariant(dbHolder().closeAll(txn, unused, false));
+ invariant(dbHolder().closeAll(opCtx, unused, false));
}
} // namespace mongo
diff --git a/src/mongo/db/service_context_d_test_fixture.h b/src/mongo/db/service_context_d_test_fixture.h
index 59962d84285..2442a10132a 100644
--- a/src/mongo/db/service_context_d_test_fixture.h
+++ b/src/mongo/db/service_context_d_test_fixture.h
@@ -61,7 +61,7 @@ private:
* Drops all databases. Call this before global ReplicationCoordinator is destroyed -- it is
* used to drop the databases.
*/
- void _dropAllDBs(OperationContext* txn);
+ void _dropAllDBs(OperationContext* opCtx);
};
} // namespace mongo
diff --git a/src/mongo/db/stats/latency_server_status_section.cpp b/src/mongo/db/stats/latency_server_status_section.cpp
index 41bccf2071f..a7087586561 100644
--- a/src/mongo/db/stats/latency_server_status_section.cpp
+++ b/src/mongo/db/stats/latency_server_status_section.cpp
@@ -46,13 +46,13 @@ public:
return true;
}
- BSONObj generateSection(OperationContext* txn, const BSONElement& configElem) const {
+ BSONObj generateSection(OperationContext* opCtx, const BSONElement& configElem) const {
BSONObjBuilder latencyBuilder;
bool includeHistograms = false;
if (configElem.type() == BSONType::Object) {
includeHistograms = configElem.Obj()["histograms"].trueValue();
}
- Top::get(txn->getServiceContext())
+ Top::get(opCtx->getServiceContext())
.appendGlobalLatencyStats(includeHistograms, &latencyBuilder);
return latencyBuilder.obj();
}
diff --git a/src/mongo/db/stats/lock_server_status_section.cpp b/src/mongo/db/stats/lock_server_status_section.cpp
index c6cde40d0ad..7884259ac4f 100644
--- a/src/mongo/db/stats/lock_server_status_section.cpp
+++ b/src/mongo/db/stats/lock_server_status_section.cpp
@@ -49,11 +49,12 @@ public:
return true;
}
- virtual BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ virtual BSONObj generateSection(OperationContext* opCtx,
+ const BSONElement& configElement) const {
std::valarray<int> clientStatusCounts(5);
// This returns the blocked lock states
- for (ServiceContext::LockedClientsCursor cursor(txn->getClient()->getServiceContext());
+ for (ServiceContext::LockedClientsCursor cursor(opCtx->getClient()->getServiceContext());
Client* client = cursor.next();) {
invariant(client);
stdx::unique_lock<Client> uniqueLock(*client);
@@ -108,7 +109,8 @@ public:
return true;
}
- virtual BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ virtual BSONObj generateSection(OperationContext* opCtx,
+ const BSONElement& configElement) const {
BSONObjBuilder ret;
SingleThreadedLockStats stats;
diff --git a/src/mongo/db/stats/range_deleter_server_status.cpp b/src/mongo/db/stats/range_deleter_server_status.cpp
index 817ffa444e4..1c62a30fef5 100644
--- a/src/mongo/db/stats/range_deleter_server_status.cpp
+++ b/src/mongo/db/stats/range_deleter_server_status.cpp
@@ -58,7 +58,7 @@ public:
return false;
}
- BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ BSONObj generateSection(OperationContext* opCtx, const BSONElement& configElement) const {
RangeDeleter* deleter = getDeleter();
if (!deleter) {
return BSONObj();
diff --git a/src/mongo/db/stats/snapshots_webplugins.cpp b/src/mongo/db/stats/snapshots_webplugins.cpp
index 5d14482e973..c45d827ebf3 100644
--- a/src/mongo/db/stats/snapshots_webplugins.cpp
+++ b/src/mongo/db/stats/snapshots_webplugins.cpp
@@ -87,7 +87,7 @@ public:
ss << "</tr>\n";
}
- void run(OperationContext* txn, stringstream& ss) {
+ void run(OperationContext* opCtx, stringstream& ss) {
StatusWith<SnapshotDiff> diff = statsSnapshots.computeDelta();
if (!diff.isOK())
diff --git a/src/mongo/db/stats/storage_stats.cpp b/src/mongo/db/stats/storage_stats.cpp
index f741f7fe482..8a9a343f31a 100644
--- a/src/mongo/db/stats/storage_stats.cpp
+++ b/src/mongo/db/stats/storage_stats.cpp
@@ -38,7 +38,7 @@
namespace mongo {
-Status appendCollectionStorageStats(OperationContext* txn,
+Status appendCollectionStorageStats(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& param,
BSONObjBuilder* result) {
@@ -54,7 +54,7 @@ Status appendCollectionStorageStats(OperationContext* txn,
bool verbose = param["verbose"].trueValue();
- AutoGetCollectionForRead ctx(txn, nss);
+ AutoGetCollectionForRead ctx(opCtx, nss);
if (!ctx.getDb()) {
return {ErrorCodes::BadValue,
str::stream() << "Database [" << nss.db().toString() << "] not found."};
@@ -66,34 +66,34 @@ Status appendCollectionStorageStats(OperationContext* txn,
str::stream() << "Collection [" << nss.toString() << "] not found."};
}
- long long size = collection->dataSize(txn) / scale;
+ long long size = collection->dataSize(opCtx) / scale;
result->appendNumber("size", size);
- long long numRecords = collection->numRecords(txn);
+ long long numRecords = collection->numRecords(opCtx);
result->appendNumber("count", numRecords);
if (numRecords)
- result->append("avgObjSize", collection->averageObjectSize(txn));
+ result->append("avgObjSize", collection->averageObjectSize(opCtx));
RecordStore* recordStore = collection->getRecordStore();
result->appendNumber(
"storageSize",
- static_cast<long long>(recordStore->storageSize(txn, result, verbose ? 1 : 0)) / scale);
+ static_cast<long long>(recordStore->storageSize(opCtx, result, verbose ? 1 : 0)) / scale);
- recordStore->appendCustomStats(txn, result, scale);
+ recordStore->appendCustomStats(opCtx, result, scale);
IndexCatalog* indexCatalog = collection->getIndexCatalog();
- result->append("nindexes", indexCatalog->numIndexesReady(txn));
+ result->append("nindexes", indexCatalog->numIndexesReady(opCtx));
BSONObjBuilder indexDetails;
- IndexCatalog::IndexIterator i = indexCatalog->getIndexIterator(txn, false);
+ IndexCatalog::IndexIterator i = indexCatalog->getIndexIterator(opCtx, false);
while (i.more()) {
const IndexDescriptor* descriptor = i.next();
IndexAccessMethod* iam = indexCatalog->getIndex(descriptor);
invariant(iam);
BSONObjBuilder bob;
- if (iam->appendCustomStats(txn, &bob, scale)) {
+ if (iam->appendCustomStats(opCtx, &bob, scale)) {
indexDetails.append(descriptor->indexName(), bob.obj());
}
}
@@ -101,7 +101,7 @@ Status appendCollectionStorageStats(OperationContext* txn,
result->append("indexDetails", indexDetails.obj());
BSONObjBuilder indexSizes;
- long long indexSize = collection->getIndexSize(txn, &indexSizes, scale);
+ long long indexSize = collection->getIndexSize(opCtx, &indexSizes, scale);
result->appendNumber("totalIndexSize", indexSize / scale);
result->append("indexSizes", indexSizes.obj());
diff --git a/src/mongo/db/stats/storage_stats.h b/src/mongo/db/stats/storage_stats.h
index 531fc3c4688..0f62229c80a 100644
--- a/src/mongo/db/stats/storage_stats.h
+++ b/src/mongo/db/stats/storage_stats.h
@@ -40,7 +40,7 @@ namespace mongo {
* Appends to 'builder' storage related statistics for the collection represented by 'nss'.
* Used by both the collStats command and the $collStats aggregation stage.
*/
-Status appendCollectionStorageStats(OperationContext* txn,
+Status appendCollectionStorageStats(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& param,
BSONObjBuilder* builder);
diff --git a/src/mongo/db/stats/top.cpp b/src/mongo/db/stats/top.cpp
index 413c5f1c015..8a8358030b8 100644
--- a/src/mongo/db/stats/top.cpp
+++ b/src/mongo/db/stats/top.cpp
@@ -72,7 +72,7 @@ Top& Top::get(ServiceContext* service) {
return getTop(service);
}
-void Top::record(OperationContext* txn,
+void Top::record(OperationContext* opCtx,
StringData ns,
LogicalOp logicalOp,
int lockType,
@@ -91,17 +91,17 @@ void Top::record(OperationContext* txn,
}
CollectionData& coll = _usage[hashedNs];
- _record(txn, coll, logicalOp, lockType, micros, readWriteType);
+ _record(opCtx, coll, logicalOp, lockType, micros, readWriteType);
}
-void Top::_record(OperationContext* txn,
+void Top::_record(OperationContext* opCtx,
CollectionData& c,
LogicalOp logicalOp,
int lockType,
long long micros,
Command::ReadWriteType readWriteType) {
- _incrementHistogram(txn, micros, &c.opLatencyHistogram, readWriteType);
+ _incrementHistogram(opCtx, micros, &c.opLatencyHistogram, readWriteType);
c.total.inc(micros);
@@ -206,11 +206,11 @@ void Top::appendLatencyStats(StringData ns, bool includeHistograms, BSONObjBuild
builder->append("latencyStats", latencyStatsBuilder.obj());
}
-void Top::incrementGlobalLatencyStats(OperationContext* txn,
+void Top::incrementGlobalLatencyStats(OperationContext* opCtx,
uint64_t latency,
Command::ReadWriteType readWriteType) {
stdx::lock_guard<SimpleMutex> guard(_lock);
- _incrementHistogram(txn, latency, &_globalHistogramStats, readWriteType);
+ _incrementHistogram(opCtx, latency, &_globalHistogramStats, readWriteType);
}
void Top::appendGlobalLatencyStats(bool includeHistograms, BSONObjBuilder* builder) {
@@ -218,12 +218,12 @@ void Top::appendGlobalLatencyStats(bool includeHistograms, BSONObjBuilder* build
_globalHistogramStats.append(includeHistograms, builder);
}
-void Top::_incrementHistogram(OperationContext* txn,
+void Top::_incrementHistogram(OperationContext* opCtx,
long long latency,
OperationLatencyHistogram* histogram,
Command::ReadWriteType readWriteType) {
// Only update histogram if operation came from a user.
- Client* client = txn->getClient();
+ Client* client = opCtx->getClient();
if (client->isFromUserConnection() && !client->isInDirectClient()) {
histogram->increment(latency, readWriteType);
}
diff --git a/src/mongo/db/stats/top.h b/src/mongo/db/stats/top.h
index 903b8e5c510..1f08e4784a5 100644
--- a/src/mongo/db/stats/top.h
+++ b/src/mongo/db/stats/top.h
@@ -87,7 +87,7 @@ public:
typedef StringMap<CollectionData> UsageMap;
public:
- void record(OperationContext* txn,
+ void record(OperationContext* opCtx,
StringData ns,
LogicalOp logicalOp,
int lockType,
@@ -109,7 +109,7 @@ public:
/**
* Increments the global histogram.
*/
- void incrementGlobalLatencyStats(OperationContext* txn,
+ void incrementGlobalLatencyStats(OperationContext* opCtx,
uint64_t latency,
Command::ReadWriteType readWriteType);
@@ -123,14 +123,14 @@ private:
void _appendStatsEntry(BSONObjBuilder& b, const char* statsName, const UsageData& map) const;
- void _record(OperationContext* txn,
+ void _record(OperationContext* opCtx,
CollectionData& c,
LogicalOp logicalOp,
int lockType,
long long micros,
Command::ReadWriteType readWriteType);
- void _incrementHistogram(OperationContext* txn,
+ void _incrementHistogram(OperationContext* opCtx,
long long latency,
OperationLatencyHistogram* histogram,
Command::ReadWriteType readWriteType);
diff --git a/src/mongo/db/storage/bson_collection_catalog_entry.cpp b/src/mongo/db/storage/bson_collection_catalog_entry.cpp
index 7837e898b56..f7c3eb3d2e0 100644
--- a/src/mongo/db/storage/bson_collection_catalog_entry.cpp
+++ b/src/mongo/db/storage/bson_collection_catalog_entry.cpp
@@ -102,19 +102,19 @@ void parseMultikeyPathsFromBytes(BSONObj multikeyPathsObj, MultikeyPaths* multik
BSONCollectionCatalogEntry::BSONCollectionCatalogEntry(StringData ns)
: CollectionCatalogEntry(ns) {}
-CollectionOptions BSONCollectionCatalogEntry::getCollectionOptions(OperationContext* txn) const {
- MetaData md = _getMetaData(txn);
+CollectionOptions BSONCollectionCatalogEntry::getCollectionOptions(OperationContext* opCtx) const {
+ MetaData md = _getMetaData(opCtx);
return md.options;
}
-int BSONCollectionCatalogEntry::getTotalIndexCount(OperationContext* txn) const {
- MetaData md = _getMetaData(txn);
+int BSONCollectionCatalogEntry::getTotalIndexCount(OperationContext* opCtx) const {
+ MetaData md = _getMetaData(opCtx);
return static_cast<int>(md.indexes.size());
}
-int BSONCollectionCatalogEntry::getCompletedIndexCount(OperationContext* txn) const {
- MetaData md = _getMetaData(txn);
+int BSONCollectionCatalogEntry::getCompletedIndexCount(OperationContext* opCtx) const {
+ MetaData md = _getMetaData(opCtx);
int num = 0;
for (unsigned i = 0; i < md.indexes.size(); i++) {
@@ -124,9 +124,9 @@ int BSONCollectionCatalogEntry::getCompletedIndexCount(OperationContext* txn) co
return num;
}
-BSONObj BSONCollectionCatalogEntry::getIndexSpec(OperationContext* txn,
+BSONObj BSONCollectionCatalogEntry::getIndexSpec(OperationContext* opCtx,
StringData indexName) const {
- MetaData md = _getMetaData(txn);
+ MetaData md = _getMetaData(opCtx);
int offset = md.findIndexOffset(indexName);
invariant(offset >= 0);
@@ -134,19 +134,19 @@ BSONObj BSONCollectionCatalogEntry::getIndexSpec(OperationContext* txn,
}
-void BSONCollectionCatalogEntry::getAllIndexes(OperationContext* txn,
+void BSONCollectionCatalogEntry::getAllIndexes(OperationContext* opCtx,
std::vector<std::string>* names) const {
- MetaData md = _getMetaData(txn);
+ MetaData md = _getMetaData(opCtx);
for (unsigned i = 0; i < md.indexes.size(); i++) {
names->push_back(md.indexes[i].spec["name"].String());
}
}
-bool BSONCollectionCatalogEntry::isIndexMultikey(OperationContext* txn,
+bool BSONCollectionCatalogEntry::isIndexMultikey(OperationContext* opCtx,
StringData indexName,
MultikeyPaths* multikeyPaths) const {
- MetaData md = _getMetaData(txn);
+ MetaData md = _getMetaData(opCtx);
int offset = md.findIndexOffset(indexName);
invariant(offset >= 0);
@@ -158,17 +158,17 @@ bool BSONCollectionCatalogEntry::isIndexMultikey(OperationContext* txn,
return md.indexes[offset].multikey;
}
-RecordId BSONCollectionCatalogEntry::getIndexHead(OperationContext* txn,
+RecordId BSONCollectionCatalogEntry::getIndexHead(OperationContext* opCtx,
StringData indexName) const {
- MetaData md = _getMetaData(txn);
+ MetaData md = _getMetaData(opCtx);
int offset = md.findIndexOffset(indexName);
invariant(offset >= 0);
return md.indexes[offset].head;
}
-bool BSONCollectionCatalogEntry::isIndexReady(OperationContext* txn, StringData indexName) const {
- MetaData md = _getMetaData(txn);
+bool BSONCollectionCatalogEntry::isIndexReady(OperationContext* opCtx, StringData indexName) const {
+ MetaData md = _getMetaData(opCtx);
int offset = md.findIndexOffset(indexName);
invariant(offset >= 0);
diff --git a/src/mongo/db/storage/bson_collection_catalog_entry.h b/src/mongo/db/storage/bson_collection_catalog_entry.h
index 83c2238fc17..f2dbd891dd7 100644
--- a/src/mongo/db/storage/bson_collection_catalog_entry.h
+++ b/src/mongo/db/storage/bson_collection_catalog_entry.h
@@ -48,23 +48,23 @@ public:
virtual ~BSONCollectionCatalogEntry() {}
- virtual CollectionOptions getCollectionOptions(OperationContext* txn) const;
+ virtual CollectionOptions getCollectionOptions(OperationContext* opCtx) const;
- virtual int getTotalIndexCount(OperationContext* txn) const;
+ virtual int getTotalIndexCount(OperationContext* opCtx) const;
- virtual int getCompletedIndexCount(OperationContext* txn) const;
+ virtual int getCompletedIndexCount(OperationContext* opCtx) const;
- virtual BSONObj getIndexSpec(OperationContext* txn, StringData idxName) const;
+ virtual BSONObj getIndexSpec(OperationContext* opCtx, StringData idxName) const;
- virtual void getAllIndexes(OperationContext* txn, std::vector<std::string>* names) const;
+ virtual void getAllIndexes(OperationContext* opCtx, std::vector<std::string>* names) const;
- virtual bool isIndexMultikey(OperationContext* txn,
+ virtual bool isIndexMultikey(OperationContext* opCtx,
StringData indexName,
MultikeyPaths* multikeyPaths) const;
- virtual RecordId getIndexHead(OperationContext* txn, StringData indexName) const;
+ virtual RecordId getIndexHead(OperationContext* opCtx, StringData indexName) const;
- virtual bool isIndexReady(OperationContext* txn, StringData indexName) const;
+ virtual bool isIndexReady(OperationContext* opCtx, StringData indexName) const;
// ------ for implementors
@@ -111,6 +111,6 @@ public:
};
protected:
- virtual MetaData _getMetaData(OperationContext* txn) const = 0;
+ virtual MetaData _getMetaData(OperationContext* opCtx) const = 0;
};
}
diff --git a/src/mongo/db/storage/capped_callback.h b/src/mongo/db/storage/capped_callback.h
index 21d5c0bad5c..dab688ba711 100644
--- a/src/mongo/db/storage/capped_callback.h
+++ b/src/mongo/db/storage/capped_callback.h
@@ -50,7 +50,7 @@ public:
* If data is unowned, it is only valid inside of this call. If implementations wish to
* stash a pointer, they must copy it.
*/
- virtual Status aboutToDeleteCapped(OperationContext* txn,
+ virtual Status aboutToDeleteCapped(OperationContext* opCtx,
const RecordId& loc,
RecordData data) = 0;
diff --git a/src/mongo/db/storage/devnull/devnull_kv_engine.cpp b/src/mongo/db/storage/devnull/devnull_kv_engine.cpp
index 6210971ae84..8d22cdf0224 100644
--- a/src/mongo/db/storage/devnull/devnull_kv_engine.cpp
+++ b/src/mongo/db/storage/devnull/devnull_kv_engine.cpp
@@ -51,7 +51,7 @@ public:
return true;
}
void detachFromOperationContext() final {}
- void reattachToOperationContext(OperationContext* txn) final {}
+ void reattachToOperationContext(OperationContext* opCtx) final {}
};
class DevNullRecordStore : public RecordStore {
@@ -68,11 +68,11 @@ public:
virtual void setCappedCallback(CappedCallback*) {}
- virtual long long dataSize(OperationContext* txn) const {
+ virtual long long dataSize(OperationContext* opCtx) const {
return 0;
}
- virtual long long numRecords(OperationContext* txn) const {
+ virtual long long numRecords(OperationContext* opCtx) const {
return 0;
}
@@ -80,23 +80,23 @@ public:
return _options.capped;
}
- virtual int64_t storageSize(OperationContext* txn,
+ virtual int64_t storageSize(OperationContext* opCtx,
BSONObjBuilder* extraInfo = NULL,
int infoLevel = 0) const {
return 0;
}
- virtual RecordData dataFor(OperationContext* txn, const RecordId& loc) const {
+ virtual RecordData dataFor(OperationContext* opCtx, const RecordId& loc) const {
return RecordData(_dummy.objdata(), _dummy.objsize());
}
- virtual bool findRecord(OperationContext* txn, const RecordId& loc, RecordData* rd) const {
+ virtual bool findRecord(OperationContext* opCtx, const RecordId& loc, RecordData* rd) const {
return false;
}
- virtual void deleteRecord(OperationContext* txn, const RecordId& dl) {}
+ virtual void deleteRecord(OperationContext* opCtx, const RecordId& dl) {}
- virtual StatusWith<RecordId> insertRecord(OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord(OperationContext* opCtx,
const char* data,
int len,
bool enforceQuota) {
@@ -104,7 +104,7 @@ public:
return StatusWith<RecordId>(RecordId(6, 4));
}
- virtual Status insertRecordsWithDocWriter(OperationContext* txn,
+ virtual Status insertRecordsWithDocWriter(OperationContext* opCtx,
const DocWriter* const* docs,
size_t nDocs,
RecordId* idsOut) {
@@ -117,7 +117,7 @@ public:
return Status::OK();
}
- virtual Status updateRecord(OperationContext* txn,
+ virtual Status updateRecord(OperationContext* opCtx,
const RecordId& oldLocation,
const char* data,
int len,
@@ -130,7 +130,7 @@ public:
return false;
}
- virtual StatusWith<RecordData> updateWithDamages(OperationContext* txn,
+ virtual StatusWith<RecordData> updateWithDamages(OperationContext* opCtx,
const RecordId& loc,
const RecordData& oldRec,
const char* damageSource,
@@ -139,18 +139,18 @@ public:
}
- std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* txn,
+ std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* opCtx,
bool forward) const final {
return stdx::make_unique<EmptyRecordCursor>();
}
- virtual Status truncate(OperationContext* txn) {
+ virtual Status truncate(OperationContext* opCtx) {
return Status::OK();
}
- virtual void cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive) {}
+ virtual void cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) {}
- virtual Status validate(OperationContext* txn,
+ virtual Status validate(OperationContext* opCtx,
ValidateCmdLevel level,
ValidateAdaptor* adaptor,
ValidateResults* results,
@@ -158,19 +158,19 @@ public:
return Status::OK();
}
- virtual void appendCustomStats(OperationContext* txn,
+ virtual void appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* result,
double scale) const {
result->appendNumber("numInserts", _numInserts);
}
- virtual Status touch(OperationContext* txn, BSONObjBuilder* output) const {
+ virtual Status touch(OperationContext* opCtx, BSONObjBuilder* output) const {
return Status::OK();
}
- void waitForAllEarlierOplogWritesToBeVisible(OperationContext* txn) const override {}
+ void waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx) const override {}
- virtual void updateStatsAfterRepair(OperationContext* txn,
+ virtual void updateStatsAfterRepair(OperationContext* opCtx,
long long numRecords,
long long dataSize) {}
@@ -195,50 +195,50 @@ class DevNullSortedDataInterface : public SortedDataInterface {
public:
virtual ~DevNullSortedDataInterface() {}
- virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* txn, bool dupsAllowed) {
+ virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* opCtx, bool dupsAllowed) {
return new DevNullSortedDataBuilderInterface();
}
- virtual Status insert(OperationContext* txn,
+ virtual Status insert(OperationContext* opCtx,
const BSONObj& key,
const RecordId& loc,
bool dupsAllowed) {
return Status::OK();
}
- virtual void unindex(OperationContext* txn,
+ virtual void unindex(OperationContext* opCtx,
const BSONObj& key,
const RecordId& loc,
bool dupsAllowed) {}
- virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& loc) {
+ virtual Status dupKeyCheck(OperationContext* opCtx, const BSONObj& key, const RecordId& loc) {
return Status::OK();
}
- virtual void fullValidate(OperationContext* txn,
+ virtual void fullValidate(OperationContext* opCtx,
long long* numKeysOut,
ValidateResults* fullResults) const {}
- virtual bool appendCustomStats(OperationContext* txn,
+ virtual bool appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* output,
double scale) const {
return false;
}
- virtual long long getSpaceUsedBytes(OperationContext* txn) const {
+ virtual long long getSpaceUsedBytes(OperationContext* opCtx) const {
return 0;
}
- virtual bool isEmpty(OperationContext* txn) {
+ virtual bool isEmpty(OperationContext* opCtx) {
return true;
}
- virtual std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* txn,
+ virtual std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* opCtx,
bool isForward) const {
return {};
}
- virtual Status initAsEmpty(OperationContext* txn) {
+ virtual Status initAsEmpty(OperationContext* opCtx) {
return Status::OK();
}
};
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp
index a036b05c44e..4488c730431 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp
@@ -142,11 +142,11 @@ public:
_currentKeySize = 0;
}
- virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* txn, bool dupsAllowed) {
+ virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* opCtx, bool dupsAllowed) {
return new EphemeralForTestBtreeBuilderImpl(_data, &_currentKeySize, dupsAllowed);
}
- virtual Status insert(OperationContext* txn,
+ virtual Status insert(OperationContext* opCtx,
const BSONObj& key,
const RecordId& loc,
bool dupsAllowed) {
@@ -167,12 +167,12 @@ public:
IndexKeyEntry entry(key.getOwned(), loc);
if (_data->insert(entry).second) {
_currentKeySize += key.objsize();
- txn->recoveryUnit()->registerChange(new IndexChange(_data, entry, true));
+ opCtx->recoveryUnit()->registerChange(new IndexChange(_data, entry, true));
}
return Status::OK();
}
- virtual void unindex(OperationContext* txn,
+ virtual void unindex(OperationContext* opCtx,
const BSONObj& key,
const RecordId& loc,
bool dupsAllowed) {
@@ -184,47 +184,51 @@ public:
invariant(numDeleted <= 1);
if (numDeleted == 1) {
_currentKeySize -= key.objsize();
- txn->recoveryUnit()->registerChange(new IndexChange(_data, entry, false));
+ opCtx->recoveryUnit()->registerChange(new IndexChange(_data, entry, false));
}
}
- virtual void fullValidate(OperationContext* txn,
+ virtual void fullValidate(OperationContext* opCtx,
long long* numKeysOut,
ValidateResults* fullResults) const {
// TODO check invariants?
*numKeysOut = _data->size();
}
- virtual bool appendCustomStats(OperationContext* txn,
+ virtual bool appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* output,
double scale) const {
return false;
}
- virtual long long getSpaceUsedBytes(OperationContext* txn) const {
+ virtual long long getSpaceUsedBytes(OperationContext* opCtx) const {
return _currentKeySize + (sizeof(IndexKeyEntry) * _data->size());
}
- virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& loc) {
+ virtual Status dupKeyCheck(OperationContext* opCtx, const BSONObj& key, const RecordId& loc) {
invariant(!hasFieldNames(key));
if (isDup(*_data, key, loc))
return dupKeyError(key);
return Status::OK();
}
- virtual bool isEmpty(OperationContext* txn) {
+ virtual bool isEmpty(OperationContext* opCtx) {
return _data->empty();
}
- virtual Status touch(OperationContext* txn) const {
+ virtual Status touch(OperationContext* opCtx) const {
// already in memory...
return Status::OK();
}
class Cursor final : public SortedDataInterface::Cursor {
public:
- Cursor(OperationContext* txn, const IndexSet& data, bool isForward, bool isUnique)
- : _txn(txn), _data(data), _forward(isForward), _isUnique(isUnique), _it(data.end()) {}
+ Cursor(OperationContext* opCtx, const IndexSet& data, bool isForward, bool isUnique)
+ : _opCtx(opCtx),
+ _data(data),
+ _forward(isForward),
+ _isUnique(isUnique),
+ _it(data.end()) {}
boost::optional<IndexKeyEntry> next(RequestedInfo parts) override {
if (_lastMoveWasRestore) {
@@ -291,7 +295,7 @@ public:
void save() override {
// Keep original position if we haven't moved since the last restore.
- _txn = nullptr;
+ _opCtx = nullptr;
if (_lastMoveWasRestore)
return;
@@ -340,11 +344,11 @@ public:
}
void detachFromOperationContext() final {
- _txn = nullptr;
+ _opCtx = nullptr;
}
- void reattachToOperationContext(OperationContext* txn) final {
- _txn = txn;
+ void reattachToOperationContext(OperationContext* opCtx) final {
+ _opCtx = opCtx;
}
private:
@@ -440,7 +444,7 @@ public:
_endState->it = it;
}
- OperationContext* _txn; // not owned
+ OperationContext* _opCtx; // not owned
const IndexSet& _data;
const bool _forward;
const bool _isUnique;
@@ -466,12 +470,12 @@ public:
RecordId _savedLoc;
};
- virtual std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* txn,
+ virtual std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* opCtx,
bool isForward) const {
- return stdx::make_unique<Cursor>(txn, *_data, isForward, _isUnique);
+ return stdx::make_unique<Cursor>(opCtx, *_data, isForward, _isUnique);
}
- virtual Status initAsEmpty(OperationContext* txn) {
+ virtual Status initAsEmpty(OperationContext* opCtx) {
// No-op
return Status::OK();
}
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h
index bfa58ff7e15..f2544ba5d1d 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h
@@ -63,11 +63,11 @@ public:
StringData ident,
const IndexDescriptor* desc);
- virtual Status beginBackup(OperationContext* txn) {
+ virtual Status beginBackup(OperationContext* opCtx) {
return Status::OK();
}
- virtual void endBackup(OperationContext* txn) {}
+ virtual void endBackup(OperationContext* opCtx) {}
virtual Status dropIdent(OperationContext* opCtx, StringData ident);
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp
index 111242d5784..7a06bea78bc 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp
@@ -111,7 +111,7 @@ private:
class EphemeralForTestRecordStore::Cursor final : public SeekableRecordCursor {
public:
- Cursor(OperationContext* txn, const EphemeralForTestRecordStore& rs)
+ Cursor(OperationContext* opCtx, const EphemeralForTestRecordStore& rs)
: _records(rs._data->records), _isCapped(rs.isCapped()) {}
boost::optional<Record> next() final {
@@ -160,7 +160,7 @@ public:
}
void detachFromOperationContext() final {}
- void reattachToOperationContext(OperationContext* txn) final {}
+ void reattachToOperationContext(OperationContext* opCtx) final {}
private:
Records::const_iterator _it;
@@ -174,7 +174,7 @@ private:
class EphemeralForTestRecordStore::ReverseCursor final : public SeekableRecordCursor {
public:
- ReverseCursor(OperationContext* txn, const EphemeralForTestRecordStore& rs)
+ ReverseCursor(OperationContext* opCtx, const EphemeralForTestRecordStore& rs)
: _records(rs._data->records), _isCapped(rs.isCapped()) {}
boost::optional<Record> next() final {
@@ -236,7 +236,7 @@ public:
}
void detachFromOperationContext() final {}
- void reattachToOperationContext(OperationContext* txn) final {}
+ void reattachToOperationContext(OperationContext* opCtx) final {}
private:
Records::const_reverse_iterator _it;
@@ -282,7 +282,8 @@ const char* EphemeralForTestRecordStore::name() const {
return "EphemeralForTest";
}
-RecordData EphemeralForTestRecordStore::dataFor(OperationContext* txn, const RecordId& loc) const {
+RecordData EphemeralForTestRecordStore::dataFor(OperationContext* opCtx,
+ const RecordId& loc) const {
return recordFor(loc)->toRecordData();
}
@@ -308,7 +309,7 @@ EphemeralForTestRecordStore::EphemeralForTestRecord* EphemeralForTestRecordStore
return &it->second;
}
-bool EphemeralForTestRecordStore::findRecord(OperationContext* txn,
+bool EphemeralForTestRecordStore::findRecord(OperationContext* opCtx,
const RecordId& loc,
RecordData* rd) const {
Records::const_iterator it = _data->records.find(loc);
@@ -319,28 +320,28 @@ bool EphemeralForTestRecordStore::findRecord(OperationContext* txn,
return true;
}
-void EphemeralForTestRecordStore::deleteRecord(OperationContext* txn, const RecordId& loc) {
+void EphemeralForTestRecordStore::deleteRecord(OperationContext* opCtx, const RecordId& loc) {
EphemeralForTestRecord* rec = recordFor(loc);
- txn->recoveryUnit()->registerChange(new RemoveChange(_data, loc, *rec));
+ opCtx->recoveryUnit()->registerChange(new RemoveChange(_data, loc, *rec));
_data->dataSize -= rec->size;
invariant(_data->records.erase(loc) == 1);
}
-bool EphemeralForTestRecordStore::cappedAndNeedDelete(OperationContext* txn) const {
+bool EphemeralForTestRecordStore::cappedAndNeedDelete(OperationContext* opCtx) const {
if (!_isCapped)
return false;
if (_data->dataSize > _cappedMaxSize)
return true;
- if ((_cappedMaxDocs != -1) && (numRecords(txn) > _cappedMaxDocs))
+ if ((_cappedMaxDocs != -1) && (numRecords(opCtx) > _cappedMaxDocs))
return true;
return false;
}
-void EphemeralForTestRecordStore::cappedDeleteAsNeeded(OperationContext* txn) {
- while (cappedAndNeedDelete(txn)) {
+void EphemeralForTestRecordStore::cappedDeleteAsNeeded(OperationContext* opCtx) {
+ while (cappedAndNeedDelete(opCtx)) {
invariant(!_data->records.empty());
Records::iterator oldest = _data->records.begin();
@@ -348,9 +349,9 @@ void EphemeralForTestRecordStore::cappedDeleteAsNeeded(OperationContext* txn) {
RecordData data = oldest->second.toRecordData();
if (_cappedCallback)
- uassertStatusOK(_cappedCallback->aboutToDeleteCapped(txn, id, data));
+ uassertStatusOK(_cappedCallback->aboutToDeleteCapped(opCtx, id, data));
- deleteRecord(txn, id);
+ deleteRecord(opCtx, id);
}
}
@@ -366,7 +367,7 @@ StatusWith<RecordId> EphemeralForTestRecordStore::extractAndCheckLocForOplog(con
return status;
}
-StatusWith<RecordId> EphemeralForTestRecordStore::insertRecord(OperationContext* txn,
+StatusWith<RecordId> EphemeralForTestRecordStore::insertRecord(OperationContext* opCtx,
const char* data,
int len,
bool enforceQuota) {
@@ -389,16 +390,16 @@ StatusWith<RecordId> EphemeralForTestRecordStore::insertRecord(OperationContext*
loc = allocateLoc();
}
- txn->recoveryUnit()->registerChange(new InsertChange(_data, loc));
+ opCtx->recoveryUnit()->registerChange(new InsertChange(_data, loc));
_data->dataSize += len;
_data->records[loc] = rec;
- cappedDeleteAsNeeded(txn);
+ cappedDeleteAsNeeded(opCtx);
return StatusWith<RecordId>(loc);
}
-Status EphemeralForTestRecordStore::insertRecordsWithDocWriter(OperationContext* txn,
+Status EphemeralForTestRecordStore::insertRecordsWithDocWriter(OperationContext* opCtx,
const DocWriter* const* docs,
size_t nDocs,
RecordId* idsOut) {
@@ -423,11 +424,11 @@ Status EphemeralForTestRecordStore::insertRecordsWithDocWriter(OperationContext*
loc = allocateLoc();
}
- txn->recoveryUnit()->registerChange(new InsertChange(_data, loc));
+ opCtx->recoveryUnit()->registerChange(new InsertChange(_data, loc));
_data->dataSize += len;
_data->records[loc] = rec;
- cappedDeleteAsNeeded(txn);
+ cappedDeleteAsNeeded(opCtx);
if (idsOut)
idsOut[i] = loc;
@@ -436,7 +437,7 @@ Status EphemeralForTestRecordStore::insertRecordsWithDocWriter(OperationContext*
return Status::OK();
}
-Status EphemeralForTestRecordStore::updateRecord(OperationContext* txn,
+Status EphemeralForTestRecordStore::updateRecord(OperationContext* opCtx,
const RecordId& loc,
const char* data,
int len,
@@ -451,7 +452,7 @@ Status EphemeralForTestRecordStore::updateRecord(OperationContext* txn,
if (notifier) {
// The in-memory KV engine uses the invalidation framework (does not support
// doc-locking), and therefore must notify that it is updating a document.
- Status callbackStatus = notifier->recordStoreGoingToUpdateInPlace(txn, loc);
+ Status callbackStatus = notifier->recordStoreGoingToUpdateInPlace(opCtx, loc);
if (!callbackStatus.isOK()) {
return callbackStatus;
}
@@ -460,11 +461,11 @@ Status EphemeralForTestRecordStore::updateRecord(OperationContext* txn,
EphemeralForTestRecord newRecord(len);
memcpy(newRecord.data.get(), data, len);
- txn->recoveryUnit()->registerChange(new RemoveChange(_data, loc, *oldRecord));
+ opCtx->recoveryUnit()->registerChange(new RemoveChange(_data, loc, *oldRecord));
_data->dataSize += len - oldLen;
*oldRecord = newRecord;
- cappedDeleteAsNeeded(txn);
+ cappedDeleteAsNeeded(opCtx);
return Status::OK();
}
@@ -474,7 +475,7 @@ bool EphemeralForTestRecordStore::updateWithDamagesSupported() const {
}
StatusWith<RecordData> EphemeralForTestRecordStore::updateWithDamages(
- OperationContext* txn,
+ OperationContext* opCtx,
const RecordId& loc,
const RecordData& oldRec,
const char* damageSource,
@@ -485,10 +486,10 @@ StatusWith<RecordData> EphemeralForTestRecordStore::updateWithDamages(
EphemeralForTestRecord newRecord(len);
memcpy(newRecord.data.get(), oldRecord->data.get(), len);
- txn->recoveryUnit()->registerChange(new RemoveChange(_data, loc, *oldRecord));
+ opCtx->recoveryUnit()->registerChange(new RemoveChange(_data, loc, *oldRecord));
*oldRecord = newRecord;
- cappedDeleteAsNeeded(txn);
+ cappedDeleteAsNeeded(opCtx);
char* root = newRecord.data.get();
mutablebson::DamageVector::const_iterator where = damages.begin();
@@ -504,33 +505,33 @@ StatusWith<RecordData> EphemeralForTestRecordStore::updateWithDamages(
return newRecord.toRecordData();
}
-std::unique_ptr<SeekableRecordCursor> EphemeralForTestRecordStore::getCursor(OperationContext* txn,
- bool forward) const {
+std::unique_ptr<SeekableRecordCursor> EphemeralForTestRecordStore::getCursor(
+ OperationContext* opCtx, bool forward) const {
if (forward)
- return stdx::make_unique<Cursor>(txn, *this);
- return stdx::make_unique<ReverseCursor>(txn, *this);
+ return stdx::make_unique<Cursor>(opCtx, *this);
+ return stdx::make_unique<ReverseCursor>(opCtx, *this);
}
-Status EphemeralForTestRecordStore::truncate(OperationContext* txn) {
+Status EphemeralForTestRecordStore::truncate(OperationContext* opCtx) {
// Unlike other changes, TruncateChange mutates _data on construction to perform the
// truncate
- txn->recoveryUnit()->registerChange(new TruncateChange(_data));
+ opCtx->recoveryUnit()->registerChange(new TruncateChange(_data));
return Status::OK();
}
-void EphemeralForTestRecordStore::cappedTruncateAfter(OperationContext* txn,
+void EphemeralForTestRecordStore::cappedTruncateAfter(OperationContext* opCtx,
RecordId end,
bool inclusive) {
Records::iterator it =
inclusive ? _data->records.lower_bound(end) : _data->records.upper_bound(end);
while (it != _data->records.end()) {
- txn->recoveryUnit()->registerChange(new RemoveChange(_data, it->first, it->second));
+ opCtx->recoveryUnit()->registerChange(new RemoveChange(_data, it->first, it->second));
_data->dataSize -= it->second.size;
_data->records.erase(it++);
}
}
-Status EphemeralForTestRecordStore::validate(OperationContext* txn,
+Status EphemeralForTestRecordStore::validate(OperationContext* opCtx,
ValidateCmdLevel level,
ValidateAdaptor* adaptor,
ValidateResults* results,
@@ -558,7 +559,7 @@ Status EphemeralForTestRecordStore::validate(OperationContext* txn,
return Status::OK();
}
-void EphemeralForTestRecordStore::appendCustomStats(OperationContext* txn,
+void EphemeralForTestRecordStore::appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* result,
double scale) const {
result->appendBool("capped", _isCapped);
@@ -568,7 +569,7 @@ void EphemeralForTestRecordStore::appendCustomStats(OperationContext* txn,
}
}
-Status EphemeralForTestRecordStore::touch(OperationContext* txn, BSONObjBuilder* output) const {
+Status EphemeralForTestRecordStore::touch(OperationContext* opCtx, BSONObjBuilder* output) const {
if (output) {
output->append("numRanges", 1);
output->append("millis", 0);
@@ -576,18 +577,18 @@ Status EphemeralForTestRecordStore::touch(OperationContext* txn, BSONObjBuilder*
return Status::OK();
}
-void EphemeralForTestRecordStore::increaseStorageSize(OperationContext* txn,
+void EphemeralForTestRecordStore::increaseStorageSize(OperationContext* opCtx,
int size,
bool enforceQuota) {
// unclear what this would mean for this class. For now, just error if called.
invariant(!"increaseStorageSize not yet implemented");
}
-int64_t EphemeralForTestRecordStore::storageSize(OperationContext* txn,
+int64_t EphemeralForTestRecordStore::storageSize(OperationContext* opCtx,
BSONObjBuilder* extraInfo,
int infoLevel) const {
// Note: not making use of extraInfo or infoLevel since we don't have extents
- const int64_t recordOverhead = numRecords(txn) * sizeof(EphemeralForTestRecord);
+ const int64_t recordOverhead = numRecords(opCtx) * sizeof(EphemeralForTestRecord);
return _data->dataSize + recordOverhead;
}
@@ -598,7 +599,7 @@ RecordId EphemeralForTestRecordStore::allocateLoc() {
}
boost::optional<RecordId> EphemeralForTestRecordStore::oplogStartHack(
- OperationContext* txn, const RecordId& startingPosition) const {
+ OperationContext* opCtx, const RecordId& startingPosition) const {
if (!_data->isOplog)
return boost::none;
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h
index 33168e1dc99..b9d44c6905e 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h
@@ -54,23 +54,23 @@ public:
virtual const char* name() const;
- virtual RecordData dataFor(OperationContext* txn, const RecordId& loc) const;
+ virtual RecordData dataFor(OperationContext* opCtx, const RecordId& loc) const;
- virtual bool findRecord(OperationContext* txn, const RecordId& loc, RecordData* rd) const;
+ virtual bool findRecord(OperationContext* opCtx, const RecordId& loc, RecordData* rd) const;
- virtual void deleteRecord(OperationContext* txn, const RecordId& dl);
+ virtual void deleteRecord(OperationContext* opCtx, const RecordId& dl);
- virtual StatusWith<RecordId> insertRecord(OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord(OperationContext* opCtx,
const char* data,
int len,
bool enforceQuota);
- virtual Status insertRecordsWithDocWriter(OperationContext* txn,
+ virtual Status insertRecordsWithDocWriter(OperationContext* opCtx,
const DocWriter* const* docs,
size_t nDocs,
RecordId* idsOut);
- virtual Status updateRecord(OperationContext* txn,
+ virtual Status updateRecord(OperationContext* opCtx,
const RecordId& oldLocation,
const char* data,
int len,
@@ -79,51 +79,51 @@ public:
virtual bool updateWithDamagesSupported() const;
- virtual StatusWith<RecordData> updateWithDamages(OperationContext* txn,
+ virtual StatusWith<RecordData> updateWithDamages(OperationContext* opCtx,
const RecordId& loc,
const RecordData& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages);
- std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* txn,
+ std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* opCtx,
bool forward) const final;
- virtual Status truncate(OperationContext* txn);
+ virtual Status truncate(OperationContext* opCtx);
- virtual void cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive);
+ virtual void cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive);
- virtual Status validate(OperationContext* txn,
+ virtual Status validate(OperationContext* opCtx,
ValidateCmdLevel level,
ValidateAdaptor* adaptor,
ValidateResults* results,
BSONObjBuilder* output);
- virtual void appendCustomStats(OperationContext* txn,
+ virtual void appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* result,
double scale) const;
- virtual Status touch(OperationContext* txn, BSONObjBuilder* output) const;
+ virtual Status touch(OperationContext* opCtx, BSONObjBuilder* output) const;
- virtual void increaseStorageSize(OperationContext* txn, int size, bool enforceQuota);
+ virtual void increaseStorageSize(OperationContext* opCtx, int size, bool enforceQuota);
- virtual int64_t storageSize(OperationContext* txn,
+ virtual int64_t storageSize(OperationContext* opCtx,
BSONObjBuilder* extraInfo = NULL,
int infoLevel = 0) const;
- virtual long long dataSize(OperationContext* txn) const {
+ virtual long long dataSize(OperationContext* opCtx) const {
return _data->dataSize;
}
- virtual long long numRecords(OperationContext* txn) const {
+ virtual long long numRecords(OperationContext* opCtx) const {
return _data->records.size();
}
- virtual boost::optional<RecordId> oplogStartHack(OperationContext* txn,
+ virtual boost::optional<RecordId> oplogStartHack(OperationContext* opCtx,
const RecordId& startingPosition) const;
- void waitForAllEarlierOplogWritesToBeVisible(OperationContext* txn) const override {}
+ void waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx) const override {}
- virtual void updateStatsAfterRepair(OperationContext* txn,
+ virtual void updateStatsAfterRepair(OperationContext* opCtx,
long long numRecords,
long long dataSize) {
invariant(_data->records.size() == size_t(numRecords));
@@ -179,8 +179,8 @@ private:
StatusWith<RecordId> extractAndCheckLocForOplog(const char* data, int len) const;
RecordId allocateLoc();
- bool cappedAndNeedDelete(OperationContext* txn) const;
- void cappedDeleteAsNeeded(OperationContext* txn);
+ bool cappedAndNeedDelete(OperationContext* opCtx) const;
+ void cappedDeleteAsNeeded(OperationContext* opCtx);
// TODO figure out a proper solution to metadata
const bool _isCapped;
diff --git a/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp b/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp
index 9f6e051f140..7ab0d15beb0 100644
--- a/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp
+++ b/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp
@@ -96,10 +96,10 @@ KVCollectionCatalogEntry::KVCollectionCatalogEntry(KVEngine* engine,
KVCollectionCatalogEntry::~KVCollectionCatalogEntry() {}
-bool KVCollectionCatalogEntry::setIndexIsMultikey(OperationContext* txn,
+bool KVCollectionCatalogEntry::setIndexIsMultikey(OperationContext* opCtx,
StringData indexName,
const MultikeyPaths& multikeyPaths) {
- MetaData md = _getMetaData(txn);
+ MetaData md = _getMetaData(opCtx);
int offset = md.findIndexOffset(indexName);
invariant(offset >= 0);
@@ -146,45 +146,45 @@ bool KVCollectionCatalogEntry::setIndexIsMultikey(OperationContext* txn,
}
}
- _catalog->putMetaData(txn, ns().toString(), md);
+ _catalog->putMetaData(opCtx, ns().toString(), md);
return true;
}
-void KVCollectionCatalogEntry::setIndexHead(OperationContext* txn,
+void KVCollectionCatalogEntry::setIndexHead(OperationContext* opCtx,
StringData indexName,
const RecordId& newHead) {
- MetaData md = _getMetaData(txn);
+ MetaData md = _getMetaData(opCtx);
int offset = md.findIndexOffset(indexName);
invariant(offset >= 0);
md.indexes[offset].head = newHead;
- _catalog->putMetaData(txn, ns().toString(), md);
+ _catalog->putMetaData(opCtx, ns().toString(), md);
}
-Status KVCollectionCatalogEntry::removeIndex(OperationContext* txn, StringData indexName) {
- MetaData md = _getMetaData(txn);
+Status KVCollectionCatalogEntry::removeIndex(OperationContext* opCtx, StringData indexName) {
+ MetaData md = _getMetaData(opCtx);
if (md.findIndexOffset(indexName) < 0)
return Status::OK(); // never had the index so nothing to do.
- const string ident = _catalog->getIndexIdent(txn, ns().ns(), indexName);
+ const string ident = _catalog->getIndexIdent(opCtx, ns().ns(), indexName);
md.eraseIndex(indexName);
- _catalog->putMetaData(txn, ns().toString(), md);
+ _catalog->putMetaData(opCtx, ns().toString(), md);
// Lazily remove to isolate underlying engine from rollback.
- txn->recoveryUnit()->registerChange(new RemoveIndexChange(txn, this, ident));
+ opCtx->recoveryUnit()->registerChange(new RemoveIndexChange(opCtx, this, ident));
return Status::OK();
}
-Status KVCollectionCatalogEntry::prepareForIndexBuild(OperationContext* txn,
+Status KVCollectionCatalogEntry::prepareForIndexBuild(OperationContext* opCtx,
const IndexDescriptor* spec) {
- MetaData md = _getMetaData(txn);
+ MetaData md = _getMetaData(opCtx);
IndexMetaData imd(spec->infoObj(), false, RecordId(), false);
if (indexTypeSupportsPathLevelMultikeyTracking(spec->getAccessMethodName())) {
const auto feature =
KVCatalog::FeatureTracker::RepairableFeature::kPathLevelMultikeyTracking;
- if (!_catalog->getFeatureTracker()->isRepairableFeatureInUse(txn, feature)) {
- _catalog->getFeatureTracker()->markRepairableFeatureAsInUse(txn, feature);
+ if (!_catalog->getFeatureTracker()->isRepairableFeatureInUse(opCtx, feature)) {
+ _catalog->getFeatureTracker()->markRepairableFeatureAsInUse(opCtx, feature);
}
imd.multikeyPaths = MultikeyPaths{static_cast<size_t>(spec->keyPattern().nFields())};
}
@@ -192,62 +192,62 @@ Status KVCollectionCatalogEntry::prepareForIndexBuild(OperationContext* txn,
// Mark collation feature as in use if the index has a non-simple collation.
if (imd.spec["collation"]) {
const auto feature = KVCatalog::FeatureTracker::NonRepairableFeature::kCollation;
- if (!_catalog->getFeatureTracker()->isNonRepairableFeatureInUse(txn, feature)) {
- _catalog->getFeatureTracker()->markNonRepairableFeatureAsInUse(txn, feature);
+ if (!_catalog->getFeatureTracker()->isNonRepairableFeatureInUse(opCtx, feature)) {
+ _catalog->getFeatureTracker()->markNonRepairableFeatureAsInUse(opCtx, feature);
}
}
md.indexes.push_back(imd);
- _catalog->putMetaData(txn, ns().toString(), md);
+ _catalog->putMetaData(opCtx, ns().toString(), md);
- string ident = _catalog->getIndexIdent(txn, ns().ns(), spec->indexName());
+ string ident = _catalog->getIndexIdent(opCtx, ns().ns(), spec->indexName());
- const Status status = _engine->createSortedDataInterface(txn, ident, spec);
+ const Status status = _engine->createSortedDataInterface(opCtx, ident, spec);
if (status.isOK()) {
- txn->recoveryUnit()->registerChange(new AddIndexChange(txn, this, ident));
+ opCtx->recoveryUnit()->registerChange(new AddIndexChange(opCtx, this, ident));
}
return status;
}
-void KVCollectionCatalogEntry::indexBuildSuccess(OperationContext* txn, StringData indexName) {
- MetaData md = _getMetaData(txn);
+void KVCollectionCatalogEntry::indexBuildSuccess(OperationContext* opCtx, StringData indexName) {
+ MetaData md = _getMetaData(opCtx);
int offset = md.findIndexOffset(indexName);
invariant(offset >= 0);
md.indexes[offset].ready = true;
- _catalog->putMetaData(txn, ns().toString(), md);
+ _catalog->putMetaData(opCtx, ns().toString(), md);
}
-void KVCollectionCatalogEntry::updateTTLSetting(OperationContext* txn,
+void KVCollectionCatalogEntry::updateTTLSetting(OperationContext* opCtx,
StringData idxName,
long long newExpireSeconds) {
- MetaData md = _getMetaData(txn);
+ MetaData md = _getMetaData(opCtx);
int offset = md.findIndexOffset(idxName);
invariant(offset >= 0);
md.indexes[offset].updateTTLSetting(newExpireSeconds);
- _catalog->putMetaData(txn, ns().toString(), md);
+ _catalog->putMetaData(opCtx, ns().toString(), md);
}
-void KVCollectionCatalogEntry::updateFlags(OperationContext* txn, int newValue) {
- MetaData md = _getMetaData(txn);
+void KVCollectionCatalogEntry::updateFlags(OperationContext* opCtx, int newValue) {
+ MetaData md = _getMetaData(opCtx);
md.options.flags = newValue;
md.options.flagsSet = true;
- _catalog->putMetaData(txn, ns().toString(), md);
+ _catalog->putMetaData(opCtx, ns().toString(), md);
}
-void KVCollectionCatalogEntry::updateValidator(OperationContext* txn,
+void KVCollectionCatalogEntry::updateValidator(OperationContext* opCtx,
const BSONObj& validator,
StringData validationLevel,
StringData validationAction) {
- MetaData md = _getMetaData(txn);
+ MetaData md = _getMetaData(opCtx);
md.options.validator = validator;
md.options.validationLevel = validationLevel.toString();
md.options.validationAction = validationAction.toString();
- _catalog->putMetaData(txn, ns().toString(), md);
+ _catalog->putMetaData(opCtx, ns().toString(), md);
}
BSONCollectionCatalogEntry::MetaData KVCollectionCatalogEntry::_getMetaData(
- OperationContext* txn) const {
- return _catalog->getMetaData(txn, ns().toString());
+ OperationContext* opCtx) const {
+ return _catalog->getMetaData(opCtx, ns().toString());
}
}
diff --git a/src/mongo/db/storage/kv/kv_collection_catalog_entry.h b/src/mongo/db/storage/kv/kv_collection_catalog_entry.h
index 430e12a3ae2..c999bee1fe5 100644
--- a/src/mongo/db/storage/kv/kv_collection_catalog_entry.h
+++ b/src/mongo/db/storage/kv/kv_collection_catalog_entry.h
@@ -55,25 +55,25 @@ public:
return 64;
};
- bool setIndexIsMultikey(OperationContext* txn,
+ bool setIndexIsMultikey(OperationContext* opCtx,
StringData indexName,
const MultikeyPaths& multikeyPaths) final;
- void setIndexHead(OperationContext* txn, StringData indexName, const RecordId& newHead) final;
+ void setIndexHead(OperationContext* opCtx, StringData indexName, const RecordId& newHead) final;
- Status removeIndex(OperationContext* txn, StringData indexName) final;
+ Status removeIndex(OperationContext* opCtx, StringData indexName) final;
- Status prepareForIndexBuild(OperationContext* txn, const IndexDescriptor* spec) final;
+ Status prepareForIndexBuild(OperationContext* opCtx, const IndexDescriptor* spec) final;
- void indexBuildSuccess(OperationContext* txn, StringData indexName) final;
+ void indexBuildSuccess(OperationContext* opCtx, StringData indexName) final;
- void updateTTLSetting(OperationContext* txn,
+ void updateTTLSetting(OperationContext* opCtx,
StringData idxName,
long long newExpireSeconds) final;
- void updateFlags(OperationContext* txn, int newValue) final;
+ void updateFlags(OperationContext* opCtx, int newValue) final;
- void updateValidator(OperationContext* txn,
+ void updateValidator(OperationContext* opCtx,
const BSONObj& validator,
StringData validationLevel,
StringData validationAction) final;
@@ -86,7 +86,7 @@ public:
}
protected:
- MetaData _getMetaData(OperationContext* txn) const final;
+ MetaData _getMetaData(OperationContext* opCtx) const final;
private:
class AddIndexChange;
diff --git a/src/mongo/db/storage/kv/kv_database_catalog_entry.cpp b/src/mongo/db/storage/kv/kv_database_catalog_entry.cpp
index f3b5d8579e6..a0dfc0e7559 100644
--- a/src/mongo/db/storage/kv/kv_database_catalog_entry.cpp
+++ b/src/mongo/db/storage/kv/kv_database_catalog_entry.cpp
@@ -52,7 +52,7 @@ auto mongo::defaultDatabaseCatalogEntryFactory(const StringData name, KVStorageE
namespace mongo {
-IndexAccessMethod* KVDatabaseCatalogEntry::getIndex(OperationContext* txn,
+IndexAccessMethod* KVDatabaseCatalogEntry::getIndex(OperationContext* opCtx,
const CollectionCatalogEntry* collection,
IndexCatalogEntry* index) {
IndexDescriptor* desc = index->descriptor();
@@ -60,9 +60,9 @@ IndexAccessMethod* KVDatabaseCatalogEntry::getIndex(OperationContext* txn,
const std::string& type = desc->getAccessMethodName();
std::string ident =
- _engine->getCatalog()->getIndexIdent(txn, collection->ns().ns(), desc->indexName());
+ _engine->getCatalog()->getIndexIdent(opCtx, collection->ns().ns(), desc->indexName());
- SortedDataInterface* sdi = _engine->getEngine()->getSortedDataInterface(txn, ident, desc);
+ SortedDataInterface* sdi = _engine->getEngine()->getSortedDataInterface(opCtx, ident, desc);
if ("" == type)
return new BtreeAccessMethod(index, sdi);
diff --git a/src/mongo/db/storage/kv/kv_database_catalog_entry.h b/src/mongo/db/storage/kv/kv_database_catalog_entry.h
index eb2c9ddb11c..3fe64a3da11 100644
--- a/src/mongo/db/storage/kv/kv_database_catalog_entry.h
+++ b/src/mongo/db/storage/kv/kv_database_catalog_entry.h
@@ -36,7 +36,7 @@ class KVDatabaseCatalogEntry : public KVDatabaseCatalogEntryBase {
public:
using KVDatabaseCatalogEntryBase::KVDatabaseCatalogEntryBase;
- IndexAccessMethod* getIndex(OperationContext* txn,
+ IndexAccessMethod* getIndex(OperationContext* opCtx,
const CollectionCatalogEntry* collection,
IndexCatalogEntry* index) final;
};
diff --git a/src/mongo/db/storage/kv/kv_database_catalog_entry_base.cpp b/src/mongo/db/storage/kv/kv_database_catalog_entry_base.cpp
index 717fae44ca9..76031197e80 100644
--- a/src/mongo/db/storage/kv/kv_database_catalog_entry_base.cpp
+++ b/src/mongo/db/storage/kv/kv_database_catalog_entry_base.cpp
@@ -191,11 +191,11 @@ RecordStore* KVDatabaseCatalogEntryBase::getRecordStore(StringData ns) const {
return it->second->getRecordStore();
}
-Status KVDatabaseCatalogEntryBase::createCollection(OperationContext* txn,
+Status KVDatabaseCatalogEntryBase::createCollection(OperationContext* opCtx,
StringData ns,
const CollectionOptions& options,
bool allocateDefaultSpace) {
- invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_X));
if (ns.empty()) {
return Status(ErrorCodes::BadValue, "Collection namespace cannot be empty");
@@ -207,28 +207,29 @@ Status KVDatabaseCatalogEntryBase::createCollection(OperationContext* txn,
}
// need to create it
- Status status = _engine->getCatalog()->newCollection(txn, ns, options);
+ Status status = _engine->getCatalog()->newCollection(opCtx, ns, options);
if (!status.isOK())
return status;
string ident = _engine->getCatalog()->getCollectionIdent(ns);
- status = _engine->getEngine()->createRecordStore(txn, ns, ident, options);
+ status = _engine->getEngine()->createRecordStore(opCtx, ns, ident, options);
if (!status.isOK())
return status;
// Mark collation feature as in use if the collection has a non-simple default collation.
if (!options.collation.isEmpty()) {
const auto feature = KVCatalog::FeatureTracker::NonRepairableFeature::kCollation;
- if (_engine->getCatalog()->getFeatureTracker()->isNonRepairableFeatureInUse(txn, feature)) {
- _engine->getCatalog()->getFeatureTracker()->markNonRepairableFeatureAsInUse(txn,
+ if (_engine->getCatalog()->getFeatureTracker()->isNonRepairableFeatureInUse(opCtx,
+ feature)) {
+ _engine->getCatalog()->getFeatureTracker()->markNonRepairableFeatureAsInUse(opCtx,
feature);
}
}
- txn->recoveryUnit()->registerChange(new AddCollectionChange(txn, this, ns, ident, true));
+ opCtx->recoveryUnit()->registerChange(new AddCollectionChange(opCtx, this, ns, ident, true));
- auto rs = _engine->getEngine()->getRecordStore(txn, ns, ident, options);
+ auto rs = _engine->getEngine()->getRecordStore(opCtx, ns, ident, options);
invariant(rs);
_collections[ns.toString()] = new KVCollectionCatalogEntry(
@@ -272,11 +273,11 @@ void KVDatabaseCatalogEntryBase::reinitCollectionAfterRepair(OperationContext* o
initCollection(opCtx, ns, false);
}
-Status KVDatabaseCatalogEntryBase::renameCollection(OperationContext* txn,
+Status KVDatabaseCatalogEntryBase::renameCollection(OperationContext* opCtx,
StringData fromNS,
StringData toNS,
bool stayTemp) {
- invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_X));
RecordStore* originalRS = NULL;
@@ -294,11 +295,11 @@ Status KVDatabaseCatalogEntryBase::renameCollection(OperationContext* txn,
const std::string identFrom = _engine->getCatalog()->getCollectionIdent(fromNS);
- Status status = _engine->getEngine()->okToRename(txn, fromNS, toNS, identFrom, originalRS);
+ Status status = _engine->getEngine()->okToRename(opCtx, fromNS, toNS, identFrom, originalRS);
if (!status.isOK())
return status;
- status = _engine->getCatalog()->renameCollection(txn, fromNS, toNS, stayTemp);
+ status = _engine->getCatalog()->renameCollection(opCtx, fromNS, toNS, stayTemp);
if (!status.isOK())
return status;
@@ -306,17 +307,18 @@ Status KVDatabaseCatalogEntryBase::renameCollection(OperationContext* txn,
invariant(identFrom == identTo);
- BSONCollectionCatalogEntry::MetaData md = _engine->getCatalog()->getMetaData(txn, toNS);
+ BSONCollectionCatalogEntry::MetaData md = _engine->getCatalog()->getMetaData(opCtx, toNS);
const CollectionMap::iterator itFrom = _collections.find(fromNS.toString());
invariant(itFrom != _collections.end());
- txn->recoveryUnit()->registerChange(
- new RemoveCollectionChange(txn, this, fromNS, identFrom, itFrom->second, false));
+ opCtx->recoveryUnit()->registerChange(
+ new RemoveCollectionChange(opCtx, this, fromNS, identFrom, itFrom->second, false));
_collections.erase(itFrom);
- txn->recoveryUnit()->registerChange(new AddCollectionChange(txn, this, toNS, identTo, false));
+ opCtx->recoveryUnit()->registerChange(
+ new AddCollectionChange(opCtx, this, toNS, identTo, false));
- auto rs = _engine->getEngine()->getRecordStore(txn, toNS, identTo, md.options);
+ auto rs = _engine->getEngine()->getRecordStore(opCtx, toNS, identTo, md.options);
_collections[toNS.toString()] = new KVCollectionCatalogEntry(
_engine->getEngine(), _engine->getCatalog(), toNS, identTo, std::move(rs));
diff --git a/src/mongo/db/storage/kv/kv_database_catalog_entry_base.h b/src/mongo/db/storage/kv/kv_database_catalog_entry_base.h
index 4d3d50a20e4..bd0e4ec3927 100644
--- a/src/mongo/db/storage/kv/kv_database_catalog_entry_base.h
+++ b/src/mongo/db/storage/kv/kv_database_catalog_entry_base.h
@@ -67,16 +67,16 @@ public:
RecordStore* getRecordStore(StringData ns) const override;
- IndexAccessMethod* getIndex(OperationContext* txn,
+ IndexAccessMethod* getIndex(OperationContext* opCtx,
const CollectionCatalogEntry* collection,
IndexCatalogEntry* index) override = 0;
- Status createCollection(OperationContext* txn,
+ Status createCollection(OperationContext* opCtx,
StringData ns,
const CollectionOptions& options,
bool allocateDefaultSpace) override;
- Status renameCollection(OperationContext* txn,
+ Status renameCollection(OperationContext* opCtx,
StringData fromNS,
StringData toNS,
bool stayTemp) override;
diff --git a/src/mongo/db/storage/kv/kv_database_catalog_entry_mock.cpp b/src/mongo/db/storage/kv/kv_database_catalog_entry_mock.cpp
index 422b36ed58a..dc9da47492b 100644
--- a/src/mongo/db/storage/kv/kv_database_catalog_entry_mock.cpp
+++ b/src/mongo/db/storage/kv/kv_database_catalog_entry_mock.cpp
@@ -42,6 +42,6 @@ std::unique_ptr<mongo::KVDatabaseCatalogEntryMock> mongo::kvDatabaseCatalogEntry
// Used to satisfy link dependencies in unit test - not invoked.
mongo::IndexAccessMethod* mongo::KVDatabaseCatalogEntryMock::getIndex(
- OperationContext* txn, const CollectionCatalogEntry* collection, IndexCatalogEntry* index) {
+ OperationContext* opCtx, const CollectionCatalogEntry* collection, IndexCatalogEntry* index) {
invariant(false);
}
diff --git a/src/mongo/db/storage/kv/kv_database_catalog_entry_mock.h b/src/mongo/db/storage/kv/kv_database_catalog_entry_mock.h
index 54def3f9b5a..840800dfaa4 100644
--- a/src/mongo/db/storage/kv/kv_database_catalog_entry_mock.h
+++ b/src/mongo/db/storage/kv/kv_database_catalog_entry_mock.h
@@ -39,7 +39,7 @@ class KVDatabaseCatalogEntryMock : public KVDatabaseCatalogEntryBase {
public:
using KVDatabaseCatalogEntryBase::KVDatabaseCatalogEntryBase;
- IndexAccessMethod* getIndex(OperationContext* txn,
+ IndexAccessMethod* getIndex(OperationContext* opCtx,
const CollectionCatalogEntry* collection,
IndexCatalogEntry* index) final;
};
diff --git a/src/mongo/db/storage/kv/kv_engine.h b/src/mongo/db/storage/kv/kv_engine.h
index 04c5acb1dfa..f76ef68bb0e 100644
--- a/src/mongo/db/storage/kv/kv_engine.h
+++ b/src/mongo/db/storage/kv/kv_engine.h
@@ -91,14 +91,14 @@ public:
virtual Status dropIdent(OperationContext* opCtx, StringData ident) = 0;
// optional
- virtual int flushAllFiles(OperationContext* txn, bool sync) {
+ virtual int flushAllFiles(OperationContext* opCtx, bool sync) {
return 0;
}
/**
* See StorageEngine::beginBackup for details
*/
- virtual Status beginBackup(OperationContext* txn) {
+ virtual Status beginBackup(OperationContext* opCtx) {
return Status(ErrorCodes::CommandNotSupported,
"The current storage engine doesn't support backup mode");
}
@@ -106,7 +106,7 @@ public:
/**
* See StorageEngine::endBackup for details
*/
- virtual void endBackup(OperationContext* txn) {
+ virtual void endBackup(OperationContext* opCtx) {
MONGO_UNREACHABLE;
}
diff --git a/src/mongo/db/storage/kv/kv_engine_test_snapshots.cpp b/src/mongo/db/storage/kv/kv_engine_test_snapshots.cpp
index ba97e3af5f8..11cad9890d1 100644
--- a/src/mongo/db/storage/kv/kv_engine_test_snapshots.cpp
+++ b/src/mongo/db/storage/kv/kv_engine_test_snapshots.cpp
@@ -52,36 +52,37 @@ public:
public:
Operation() = default;
Operation(ServiceContext::UniqueClient client, RecoveryUnit* ru)
- : _client(std::move(client)), _txn(_client->makeOperationContext()) {
- delete _txn->releaseRecoveryUnit();
- _txn->setRecoveryUnit(ru, OperationContext::kNotInUnitOfWork);
+ : _client(std::move(client)), _opCtx(_client->makeOperationContext()) {
+ delete _opCtx->releaseRecoveryUnit();
+ _opCtx->setRecoveryUnit(ru, OperationContext::kNotInUnitOfWork);
}
Operation(Operation&& other) = default;
Operation& operator=(Operation&& other) {
- // Need to assign to _txn first if active. Otherwise we'd destroy _client before _txn.
- _txn = std::move(other._txn);
+ // Need to assign to _opCtx first if active. Otherwise we'd destroy _client before
+ // _opCtx.
+ _opCtx = std::move(other._opCtx);
_client = std::move(other._client);
return *this;
}
OperationContext& operator*() const {
- return *_txn;
+ return *_opCtx;
}
OperationContext* operator->() const {
- return _txn.get();
+ return _opCtx.get();
}
operator OperationContext*() const {
- return _txn.get();
+ return _opCtx.get();
}
private:
ServiceContext::UniqueClient _client;
- ServiceContext::UniqueOperationContext _txn;
+ ServiceContext::UniqueOperationContext _opCtx;
};
Operation makeOperation() {
@@ -104,8 +105,8 @@ public:
return createSnapshot();
}
- RecordId insertRecord(OperationContext* txn, std::string contents = "abcd") {
- auto id = rs->insertRecord(txn, contents.c_str(), contents.length() + 1, false);
+ RecordId insertRecord(OperationContext* opCtx, std::string contents = "abcd") {
+ auto id = rs->insertRecord(opCtx, contents.c_str(), contents.length() + 1, false);
ASSERT_OK(id);
return id.getValue();
}
@@ -136,8 +137,8 @@ public:
/**
* Returns the number of records seen iterating rs using the passed-in OperationContext.
*/
- int itCountOn(OperationContext* txn) {
- auto cursor = rs->getCursor(txn);
+ int itCountOn(OperationContext* opCtx) {
+ auto cursor = rs->getCursor(opCtx);
int count = 0;
while (auto record = cursor->next()) {
count++;
diff --git a/src/mongo/db/storage/kv/kv_storage_engine.cpp b/src/mongo/db/storage/kv/kv_storage_engine.cpp
index ee02c447bdf..e74a60353a4 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine.cpp
+++ b/src/mongo/db/storage/kv/kv_storage_engine.cpp
@@ -208,12 +208,12 @@ KVDatabaseCatalogEntryBase* KVStorageEngine::getDatabaseCatalogEntry(OperationCo
return db;
}
-Status KVStorageEngine::closeDatabase(OperationContext* txn, StringData db) {
+Status KVStorageEngine::closeDatabase(OperationContext* opCtx, StringData db) {
// This is ok to be a no-op as there is no database layer in kv.
return Status::OK();
}
-Status KVStorageEngine::dropDatabase(OperationContext* txn, StringData db) {
+Status KVStorageEngine::dropDatabase(OperationContext* opCtx, StringData db) {
KVDatabaseCatalogEntryBase* entry;
{
stdx::lock_guard<stdx::mutex> lk(_dbsLock);
@@ -228,14 +228,14 @@ Status KVStorageEngine::dropDatabase(OperationContext* txn, StringData db) {
// wherever possible. Eventually we want to move this up so that it can include the logOp
// inside of the WUOW, but that would require making DB dropping happen inside the Dur
// system for MMAPv1.
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
std::list<std::string> toDrop;
entry->getCollectionNamespaces(&toDrop);
for (std::list<std::string>::iterator it = toDrop.begin(); it != toDrop.end(); ++it) {
string coll = *it;
- entry->dropCollection(txn, coll);
+ entry->dropCollection(opCtx, coll);
}
toDrop.clear();
entry->getCollectionNamespaces(&toDrop);
@@ -243,7 +243,7 @@ Status KVStorageEngine::dropDatabase(OperationContext* txn, StringData db) {
{
stdx::lock_guard<stdx::mutex> lk(_dbsLock);
- txn->recoveryUnit()->registerChange(new RemoveDBChange(this, db, entry));
+ opCtx->recoveryUnit()->registerChange(new RemoveDBChange(this, db, entry));
_dbs.erase(db.toString());
}
@@ -251,24 +251,24 @@ Status KVStorageEngine::dropDatabase(OperationContext* txn, StringData db) {
return Status::OK();
}
-int KVStorageEngine::flushAllFiles(OperationContext* txn, bool sync) {
- return _engine->flushAllFiles(txn, sync);
+int KVStorageEngine::flushAllFiles(OperationContext* opCtx, bool sync) {
+ return _engine->flushAllFiles(opCtx, sync);
}
-Status KVStorageEngine::beginBackup(OperationContext* txn) {
+Status KVStorageEngine::beginBackup(OperationContext* opCtx) {
// We should not proceed if we are already in backup mode
if (_inBackupMode)
return Status(ErrorCodes::BadValue, "Already in Backup Mode");
- Status status = _engine->beginBackup(txn);
+ Status status = _engine->beginBackup(opCtx);
if (status.isOK())
_inBackupMode = true;
return status;
}
-void KVStorageEngine::endBackup(OperationContext* txn) {
+void KVStorageEngine::endBackup(OperationContext* opCtx) {
// We should never reach here if we aren't already in backup mode
invariant(_inBackupMode);
- _engine->endBackup(txn);
+ _engine->endBackup(opCtx);
_inBackupMode = false;
}
@@ -284,12 +284,12 @@ SnapshotManager* KVStorageEngine::getSnapshotManager() const {
return _engine->getSnapshotManager();
}
-Status KVStorageEngine::repairRecordStore(OperationContext* txn, const std::string& ns) {
- Status status = _engine->repairIdent(txn, _catalog->getCollectionIdent(ns));
+Status KVStorageEngine::repairRecordStore(OperationContext* opCtx, const std::string& ns) {
+ Status status = _engine->repairIdent(opCtx, _catalog->getCollectionIdent(ns));
if (!status.isOK())
return status;
- _dbs[nsToDatabase(ns)]->reinitCollectionAfterRepair(txn, ns);
+ _dbs[nsToDatabase(ns)]->reinitCollectionAfterRepair(opCtx, ns);
return Status::OK();
}
diff --git a/src/mongo/db/storage/kv/kv_storage_engine.h b/src/mongo/db/storage/kv/kv_storage_engine.h
index ba656ae85c1..800f698b34a 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine.h
+++ b/src/mongo/db/storage/kv/kv_storage_engine.h
@@ -89,21 +89,21 @@ public:
return _supportsDocLocking;
}
- virtual Status closeDatabase(OperationContext* txn, StringData db);
+ virtual Status closeDatabase(OperationContext* opCtx, StringData db);
- virtual Status dropDatabase(OperationContext* txn, StringData db);
+ virtual Status dropDatabase(OperationContext* opCtx, StringData db);
- virtual int flushAllFiles(OperationContext* txn, bool sync);
+ virtual int flushAllFiles(OperationContext* opCtx, bool sync);
- virtual Status beginBackup(OperationContext* txn);
+ virtual Status beginBackup(OperationContext* opCtx);
- virtual void endBackup(OperationContext* txn);
+ virtual void endBackup(OperationContext* opCtx);
virtual bool isDurable() const;
virtual bool isEphemeral() const;
- virtual Status repairRecordStore(OperationContext* txn, const std::string& ns);
+ virtual Status repairRecordStore(OperationContext* opCtx, const std::string& ns);
virtual void cleanShutdown();
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp
index c2e8d710062..14a3e57503b 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp
@@ -81,57 +81,57 @@ public:
virtual ~BtreeInterfaceImpl() {}
- virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* txn, bool dupsAllowed) {
- return new BtreeBuilderInterfaceImpl<OnDiskFormat>(txn,
- _btree->newBuilder(txn, dupsAllowed));
+ virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* opCtx, bool dupsAllowed) {
+ return new BtreeBuilderInterfaceImpl<OnDiskFormat>(opCtx,
+ _btree->newBuilder(opCtx, dupsAllowed));
}
- virtual Status insert(OperationContext* txn,
+ virtual Status insert(OperationContext* opCtx,
const BSONObj& key,
const RecordId& loc,
bool dupsAllowed) {
- return _btree->insert(txn, key, DiskLoc::fromRecordId(loc), dupsAllowed);
+ return _btree->insert(opCtx, key, DiskLoc::fromRecordId(loc), dupsAllowed);
}
- virtual void unindex(OperationContext* txn,
+ virtual void unindex(OperationContext* opCtx,
const BSONObj& key,
const RecordId& loc,
bool dupsAllowed) {
- _btree->unindex(txn, key, DiskLoc::fromRecordId(loc));
+ _btree->unindex(opCtx, key, DiskLoc::fromRecordId(loc));
}
- virtual void fullValidate(OperationContext* txn,
+ virtual void fullValidate(OperationContext* opCtx,
long long* numKeysOut,
ValidateResults* fullResults) const {
- *numKeysOut = _btree->fullValidate(txn, NULL, false, false, 0);
+ *numKeysOut = _btree->fullValidate(opCtx, NULL, false, false, 0);
}
- virtual bool appendCustomStats(OperationContext* txn,
+ virtual bool appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* output,
double scale) const {
return false;
}
- virtual long long getSpaceUsedBytes(OperationContext* txn) const {
- return _btree->getRecordStore()->dataSize(txn);
+ virtual long long getSpaceUsedBytes(OperationContext* opCtx) const {
+ return _btree->getRecordStore()->dataSize(opCtx);
}
- virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& loc) {
- return _btree->dupKeyCheck(txn, key, DiskLoc::fromRecordId(loc));
+ virtual Status dupKeyCheck(OperationContext* opCtx, const BSONObj& key, const RecordId& loc) {
+ return _btree->dupKeyCheck(opCtx, key, DiskLoc::fromRecordId(loc));
}
- virtual bool isEmpty(OperationContext* txn) {
- return _btree->isEmpty(txn);
+ virtual bool isEmpty(OperationContext* opCtx) {
+ return _btree->isEmpty(opCtx);
}
- virtual Status touch(OperationContext* txn) const {
- return _btree->touch(txn);
+ virtual Status touch(OperationContext* opCtx) const {
+ return _btree->touch(opCtx);
}
class Cursor final : public SortedDataInterface::Cursor {
public:
- Cursor(OperationContext* txn, const BtreeLogic<OnDiskFormat>* btree, bool forward)
- : _txn(txn), _btree(btree), _direction(forward ? 1 : -1), _ofs(0) {}
+ Cursor(OperationContext* opCtx, const BtreeLogic<OnDiskFormat>* btree, bool forward)
+ : _opCtx(opCtx), _btree(btree), _direction(forward ? 1 : -1), _ofs(0) {}
boost::optional<IndexKeyEntry> next(RequestedInfo parts) override {
if (isEOF())
@@ -140,7 +140,7 @@ public:
// Return current position rather than advancing.
_lastMoveWasRestore = false;
} else {
- _btree->advance(_txn, &_bucket, &_ofs, _direction);
+ _btree->advance(_opCtx, &_bucket, &_ofs, _direction);
}
if (atEndPoint())
@@ -186,12 +186,12 @@ public:
if (canUseAdvanceTo) {
// This takes advantage of current location.
- _btree->advanceTo(_txn, &_bucket, &_ofs, seekPoint, _direction);
+ _btree->advanceTo(_opCtx, &_bucket, &_ofs, seekPoint, _direction);
} else {
// Start at root.
- _bucket = _btree->getHead(_txn);
+ _bucket = _btree->getHead(_opCtx);
_ofs = 0;
- _btree->customLocate(_txn, &_bucket, &_ofs, seekPoint, _direction);
+ _btree->customLocate(_opCtx, &_bucket, &_ofs, seekPoint, _direction);
}
_lastMoveWasRestore = false;
@@ -239,7 +239,8 @@ public:
if (_btree->savedCursors()->unregisterCursor(&_saved)) {
// We can use the fast restore mechanism.
- _btree->restorePosition(_txn, _saved.key, _saved.loc, _direction, &_bucket, &_ofs);
+ _btree->restorePosition(
+ _opCtx, _saved.key, _saved.loc, _direction, &_bucket, &_ofs);
} else {
// Need to find our position from the root.
locate(_saved.key, _saved.loc.toRecordId());
@@ -251,11 +252,11 @@ public:
}
void detachFromOperationContext() final {
- _txn = nullptr;
+ _opCtx = nullptr;
}
- void reattachToOperationContext(OperationContext* txn) final {
- _txn = txn;
+ void reattachToOperationContext(OperationContext* opCtx) final {
+ _opCtx = opCtx;
}
private:
@@ -288,7 +289,7 @@ public:
}
void locate(const BSONObj& key, const RecordId& loc) {
- _btree->locate(_txn, key, DiskLoc::fromRecordId(loc), _direction, &_ofs, &_bucket);
+ _btree->locate(_opCtx, key, DiskLoc::fromRecordId(loc), _direction, &_ofs, &_bucket);
if (atOrPastEndPointAfterSeeking())
markEOF();
}
@@ -301,16 +302,16 @@ public:
}
BSONObj getKey() const {
- return _btree->getKey(_txn, _bucket, _ofs);
+ return _btree->getKey(_opCtx, _bucket, _ofs);
}
DiskLoc getDiskLoc() const {
- return _btree->getDiskLoc(_txn, _bucket, _ofs);
+ return _btree->getDiskLoc(_opCtx, _bucket, _ofs);
}
void seekEndCursor() {
if (!_endState)
return;
- _btree->locate(_txn,
+ _btree->locate(_opCtx,
_endState->key,
forward() == _endState->inclusive ? DiskLoc::max() : DiskLoc::min(),
_direction,
@@ -322,7 +323,7 @@ public:
return _direction == 1;
}
- OperationContext* _txn; // not owned
+ OperationContext* _opCtx; // not owned
const BtreeLogic<OnDiskFormat>* const _btree;
const int _direction;
@@ -347,29 +348,29 @@ public:
SavedCursorRegistry::SavedCursor _saved;
};
- virtual std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* txn,
+ virtual std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* opCtx,
bool isForward = true) const {
- return stdx::make_unique<Cursor>(txn, _btree.get(), isForward);
+ return stdx::make_unique<Cursor>(opCtx, _btree.get(), isForward);
}
class RandomCursor final : public SortedDataInterface::Cursor {
public:
- RandomCursor(OperationContext* txn, const BtreeLogic<OnDiskFormat>* btree)
- : _txn(txn), _btree(btree) {}
+ RandomCursor(OperationContext* opCtx, const BtreeLogic<OnDiskFormat>* btree)
+ : _opCtx(opCtx), _btree(btree) {}
boost::optional<IndexKeyEntry> next(RequestedInfo parts) override {
- if (_btree->isEmpty(_txn)) {
+ if (_btree->isEmpty(_opCtx)) {
return {};
}
- return _btree->getRandomEntry(_txn);
+ return _btree->getRandomEntry(_opCtx);
}
void detachFromOperationContext() final {
- _txn = nullptr;
+ _opCtx = nullptr;
}
- void reattachToOperationContext(OperationContext* txn) final {
- _txn = txn;
+ void reattachToOperationContext(OperationContext* opCtx) final {
+ _opCtx = opCtx;
}
//
@@ -396,17 +397,17 @@ public:
void restore() override {}
private:
- OperationContext* _txn;
+ OperationContext* _opCtx;
const BtreeLogic<OnDiskFormat>* const _btree;
};
virtual std::unique_ptr<SortedDataInterface::Cursor> newRandomCursor(
- OperationContext* txn) const {
- return stdx::make_unique<RandomCursor>(txn, _btree.get());
+ OperationContext* opCtx) const {
+ return stdx::make_unique<RandomCursor>(opCtx, _btree.get());
}
- virtual Status initAsEmpty(OperationContext* txn) {
- return _btree->initAsEmpty(txn);
+ virtual Status initAsEmpty(OperationContext* opCtx) {
+ return _btree->initAsEmpty(opCtx);
}
private:
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
index f6702fe27aa..de030d13cdc 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
@@ -80,22 +80,22 @@ std::once_flag assertValidFlag;
template <class BtreeLayout>
typename BtreeLogic<BtreeLayout>::Builder* BtreeLogic<BtreeLayout>::newBuilder(
- OperationContext* txn, bool dupsAllowed) {
- return new Builder(this, txn, dupsAllowed);
+ OperationContext* opCtx, bool dupsAllowed) {
+ return new Builder(this, opCtx, dupsAllowed);
}
template <class BtreeLayout>
BtreeLogic<BtreeLayout>::Builder::Builder(BtreeLogic* logic,
- OperationContext* txn,
+ OperationContext* opCtx,
bool dupsAllowed)
- : _logic(logic), _dupsAllowed(dupsAllowed), _txn(txn) {
+ : _logic(logic), _dupsAllowed(dupsAllowed), _opCtx(opCtx) {
// The normal bulk building path calls initAsEmpty, so we already have an empty root bucket.
// This isn't the case in some unit tests that use the Builder directly rather than going
// through an IndexAccessMethod.
- _rightLeafLoc = DiskLoc::fromRecordId(_logic->_headManager->getHead(txn));
+ _rightLeafLoc = DiskLoc::fromRecordId(_logic->_headManager->getHead(opCtx));
if (_rightLeafLoc.isNull()) {
- _rightLeafLoc = _logic->_addBucket(txn);
- _logic->_headManager->setHead(_txn, _rightLeafLoc.toRecordId());
+ _rightLeafLoc = _logic->_addBucket(opCtx);
+ _logic->_headManager->setHead(_opCtx, _rightLeafLoc.toRecordId());
}
// must be empty when starting
@@ -146,7 +146,7 @@ Status BtreeLogic<BtreeLayout>::Builder::addKey(const BSONObj& keyObj, const Dis
BucketType* rightLeaf = _getModifiableBucket(_rightLeafLoc);
if (!_logic->pushBack(rightLeaf, loc, *key, DiskLoc())) {
// bucket was full, so split and try with the new node.
- _txn->recoveryUnit()->registerChange(new SetRightLeafLocChange(this, _rightLeafLoc));
+ _opCtx->recoveryUnit()->registerChange(new SetRightLeafLocChange(this, _rightLeafLoc));
_rightLeafLoc = newBucket(rightLeaf, _rightLeafLoc);
rightLeaf = _getModifiableBucket(_rightLeafLoc);
invariant(_logic->pushBack(rightLeaf, loc, *key, DiskLoc()));
@@ -166,14 +166,14 @@ DiskLoc BtreeLogic<BtreeLayout>::Builder::newBucket(BucketType* leftSib, DiskLoc
if (leftSib->parent.isNull()) {
// Making a new root
- invariant(leftSibLoc.toRecordId() == _logic->_headManager->getHead(_txn));
- const DiskLoc newRootLoc = _logic->_addBucket(_txn);
+ invariant(leftSibLoc.toRecordId() == _logic->_headManager->getHead(_opCtx));
+ const DiskLoc newRootLoc = _logic->_addBucket(_opCtx);
leftSib->parent = newRootLoc;
- _logic->_headManager->setHead(_txn, newRootLoc.toRecordId());
+ _logic->_headManager->setHead(_opCtx, newRootLoc.toRecordId());
// Set the newRoot's nextChild to point to leftSib for the invariant below.
BucketType* newRoot = _getBucket(newRootLoc);
- *_txn->recoveryUnit()->writing(&newRoot->nextChild) = leftSibLoc;
+ *_opCtx->recoveryUnit()->writing(&newRoot->nextChild) = leftSibLoc;
}
DiskLoc parentLoc = leftSib->parent;
@@ -198,23 +198,23 @@ DiskLoc BtreeLogic<BtreeLayout>::Builder::newBucket(BucketType* leftSib, DiskLoc
// Create a new bucket to the right of leftSib and set its parent pointer and the downward
// nextChild pointer from the parent.
- DiskLoc newBucketLoc = _logic->_addBucket(_txn);
+ DiskLoc newBucketLoc = _logic->_addBucket(_opCtx);
BucketType* newBucket = _getBucket(newBucketLoc);
- *_txn->recoveryUnit()->writing(&newBucket->parent) = parentLoc;
- *_txn->recoveryUnit()->writing(&parent->nextChild) = newBucketLoc;
+ *_opCtx->recoveryUnit()->writing(&newBucket->parent) = parentLoc;
+ *_opCtx->recoveryUnit()->writing(&parent->nextChild) = newBucketLoc;
return newBucketLoc;
}
template <class BtreeLayout>
typename BtreeLogic<BtreeLayout>::BucketType*
BtreeLogic<BtreeLayout>::Builder::_getModifiableBucket(DiskLoc loc) {
- return _logic->btreemod(_txn, _logic->getBucket(_txn, loc));
+ return _logic->btreemod(_opCtx, _logic->getBucket(_opCtx, loc));
}
template <class BtreeLayout>
typename BtreeLogic<BtreeLayout>::BucketType* BtreeLogic<BtreeLayout>::Builder::_getBucket(
DiskLoc loc) {
- return _logic->getBucket(_txn, loc);
+ return _logic->getBucket(_opCtx, loc);
}
//
@@ -261,8 +261,8 @@ char* BtreeLogic<BtreeLayout>::dataAt(BucketType* bucket, short ofs) {
template <class BtreeLayout>
typename BtreeLogic<BtreeLayout>::BucketType* BtreeLogic<BtreeLayout>::btreemod(
- OperationContext* txn, BucketType* bucket) {
- txn->recoveryUnit()->writingPtr(bucket, BtreeLayout::BucketSize);
+ OperationContext* opCtx, BucketType* bucket) {
+ opCtx->recoveryUnit()->writingPtr(bucket, BtreeLayout::BucketSize);
return bucket;
}
@@ -433,7 +433,7 @@ bool BtreeLogic<BtreeLayout>::pushBack(BucketType* bucket,
* Returns false if a split is required.
*/
template <class BtreeLayout>
-bool BtreeLogic<BtreeLayout>::basicInsert(OperationContext* txn,
+bool BtreeLogic<BtreeLayout>::basicInsert(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int& keypos,
@@ -444,13 +444,13 @@ bool BtreeLogic<BtreeLayout>::basicInsert(OperationContext* txn,
int bytesNeeded = key.dataSize() + sizeof(KeyHeaderType);
if (bytesNeeded > bucket->emptySize) {
- _pack(txn, bucket, bucketLoc, keypos);
+ _pack(opCtx, bucket, bucketLoc, keypos);
if (bytesNeeded > bucket->emptySize) {
return false;
}
}
- invariant(getBucket(txn, bucketLoc) == bucket);
+ invariant(getBucket(opCtx, bucketLoc) == bucket);
{
// declare that we will write to [k(keypos),k(n)]
@@ -458,7 +458,7 @@ bool BtreeLogic<BtreeLayout>::basicInsert(OperationContext* txn,
char* end = reinterpret_cast<char*>(&getKeyHeader(bucket, bucket->n + 1));
// Declare that we will write to [k(keypos),k(n)]
- txn->recoveryUnit()->writingPtr(start, end - start);
+ opCtx->recoveryUnit()->writingPtr(start, end - start);
}
// e.g. for n==3, keypos==2
@@ -468,7 +468,7 @@ bool BtreeLogic<BtreeLayout>::basicInsert(OperationContext* txn,
}
size_t writeLen = sizeof(bucket->emptySize) + sizeof(bucket->topSize) + sizeof(bucket->n);
- txn->recoveryUnit()->writingPtr(&bucket->emptySize, writeLen);
+ opCtx->recoveryUnit()->writingPtr(&bucket->emptySize, writeLen);
bucket->emptySize -= sizeof(KeyHeaderType);
bucket->n++;
@@ -478,7 +478,7 @@ bool BtreeLogic<BtreeLayout>::basicInsert(OperationContext* txn,
kn.recordLoc = recordLoc;
kn.setKeyDataOfs((short)_alloc(bucket, key.dataSize()));
char* p = dataAt(bucket, kn.keyDataOfs());
- txn->recoveryUnit()->writingPtr(p, key.dataSize());
+ opCtx->recoveryUnit()->writingPtr(p, key.dataSize());
memcpy(p, key.data(), key.dataSize());
return true;
}
@@ -515,17 +515,17 @@ int BtreeLogic<BtreeLayout>::_packedDataSize(BucketType* bucket, int refPos) {
* it.
*/
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::_pack(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::_pack(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc thisLoc,
int& refPos) {
- invariant(getBucket(txn, thisLoc) == bucket);
+ invariant(getBucket(opCtx, thisLoc) == bucket);
if (bucket->flags & Packed) {
return;
}
- _packReadyForMod(btreemod(txn, bucket), refPos);
+ _packReadyForMod(btreemod(opCtx, bucket), refPos);
}
/**
@@ -669,44 +669,44 @@ void BtreeLogic<BtreeLayout>::dropFront(BucketType* bucket, int nDrop, int& refp
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::customLocate(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::customLocate(OperationContext* opCtx,
DiskLoc* locInOut,
int* keyOfsInOut,
const IndexSeekPoint& seekPoint,
int direction) const {
pair<DiskLoc, int> unused;
- customLocate(txn, locInOut, keyOfsInOut, seekPoint, direction, unused);
- skipUnusedKeys(txn, locInOut, keyOfsInOut, direction);
+ customLocate(opCtx, locInOut, keyOfsInOut, seekPoint, direction, unused);
+ skipUnusedKeys(opCtx, locInOut, keyOfsInOut, direction);
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::advance(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::advance(OperationContext* opCtx,
DiskLoc* bucketLocInOut,
int* posInOut,
int direction) const {
- *bucketLocInOut = advance(txn, *bucketLocInOut, posInOut, direction);
- skipUnusedKeys(txn, bucketLocInOut, posInOut, direction);
+ *bucketLocInOut = advance(opCtx, *bucketLocInOut, posInOut, direction);
+ skipUnusedKeys(opCtx, bucketLocInOut, posInOut, direction);
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::skipUnusedKeys(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::skipUnusedKeys(OperationContext* opCtx,
DiskLoc* loc,
int* pos,
int direction) const {
- while (!loc->isNull() && !keyIsUsed(txn, *loc, *pos)) {
- *loc = advance(txn, *loc, pos, direction);
+ while (!loc->isNull() && !keyIsUsed(opCtx, *loc, *pos)) {
+ *loc = advance(opCtx, *loc, pos, direction);
}
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::advanceTo(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::advanceTo(OperationContext* opCtx,
DiskLoc* thisLocInOut,
int* keyOfsInOut,
const IndexSeekPoint& seekPoint,
int direction) const {
- advanceToImpl(txn, thisLocInOut, keyOfsInOut, seekPoint, direction);
- skipUnusedKeys(txn, thisLocInOut, keyOfsInOut, direction);
+ advanceToImpl(opCtx, thisLocInOut, keyOfsInOut, seekPoint, direction);
+ skipUnusedKeys(opCtx, thisLocInOut, keyOfsInOut, direction);
}
/**
@@ -719,12 +719,12 @@ void BtreeLogic<BtreeLayout>::advanceTo(OperationContext* txn,
* and reverse implementations would be more efficient
*/
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::advanceToImpl(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::advanceToImpl(OperationContext* opCtx,
DiskLoc* thisLocInOut,
int* keyOfsInOut,
const IndexSeekPoint& seekPoint,
int direction) const {
- BucketType* bucket = getBucket(txn, *thisLocInOut);
+ BucketType* bucket = getBucket(opCtx, *thisLocInOut);
int l, h;
bool dontGoUp;
@@ -745,14 +745,14 @@ void BtreeLogic<BtreeLayout>::advanceToImpl(OperationContext* txn,
if (dontGoUp) {
// this comparison result assures h > l
- if (!customFind(txn, l, h, seekPoint, direction, thisLocInOut, keyOfsInOut, bestParent)) {
+ if (!customFind(opCtx, l, h, seekPoint, direction, thisLocInOut, keyOfsInOut, bestParent)) {
return;
}
} else {
// go up parents until rightmost/leftmost node is >=/<= target or at top
while (!bucket->parent.isNull()) {
*thisLocInOut = bucket->parent;
- bucket = getBucket(txn, *thisLocInOut);
+ bucket = getBucket(opCtx, *thisLocInOut);
if (direction > 0) {
if (customBSONCmp(getFullKey(bucket, bucket->n - 1).data.toBson(),
@@ -768,17 +768,17 @@ void BtreeLogic<BtreeLayout>::advanceToImpl(OperationContext* txn,
}
}
- customLocate(txn, thisLocInOut, keyOfsInOut, seekPoint, direction, bestParent);
+ customLocate(opCtx, thisLocInOut, keyOfsInOut, seekPoint, direction, bestParent);
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::customLocate(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::customLocate(OperationContext* opCtx,
DiskLoc* locInOut,
int* keyOfsInOut,
const IndexSeekPoint& seekPoint,
int direction,
pair<DiskLoc, int>& bestParent) const {
- BucketType* bucket = getBucket(txn, *locInOut);
+ BucketType* bucket = getBucket(opCtx, *locInOut);
if (0 == bucket->n) {
*locInOut = DiskLoc();
@@ -809,7 +809,7 @@ void BtreeLogic<BtreeLayout>::customLocate(OperationContext* txn,
if (!next.isNull()) {
bestParent = pair<DiskLoc, int>(*locInOut, *keyOfsInOut);
*locInOut = next;
- bucket = getBucket(txn, *locInOut);
+ bucket = getBucket(opCtx, *locInOut);
continue;
} else {
return;
@@ -832,21 +832,21 @@ void BtreeLogic<BtreeLayout>::customLocate(OperationContext* txn,
return;
} else {
*locInOut = next;
- bucket = getBucket(txn, *locInOut);
+ bucket = getBucket(opCtx, *locInOut);
continue;
}
}
- if (!customFind(txn, l, h, seekPoint, direction, locInOut, keyOfsInOut, bestParent)) {
+ if (!customFind(opCtx, l, h, seekPoint, direction, locInOut, keyOfsInOut, bestParent)) {
return;
}
- bucket = getBucket(txn, *locInOut);
+ bucket = getBucket(opCtx, *locInOut);
}
}
template <class BtreeLayout>
-bool BtreeLogic<BtreeLayout>::customFind(OperationContext* txn,
+bool BtreeLogic<BtreeLayout>::customFind(OperationContext* opCtx,
int low,
int high,
const IndexSeekPoint& seekPoint,
@@ -854,7 +854,7 @@ bool BtreeLogic<BtreeLayout>::customFind(OperationContext* txn,
DiskLoc* thisLocInOut,
int* keyOfsInOut,
pair<DiskLoc, int>& bestParent) const {
- const BucketType* bucket = getBucket(txn, *thisLocInOut);
+ const BucketType* bucket = getBucket(opCtx, *thisLocInOut);
for (;;) {
if (low + 1 == high) {
@@ -942,31 +942,31 @@ int BtreeLogic<BtreeLayout>::customBSONCmp(const BSONObj& left,
}
template <class BtreeLayout>
-bool BtreeLogic<BtreeLayout>::exists(OperationContext* txn, const KeyDataType& key) const {
+bool BtreeLogic<BtreeLayout>::exists(OperationContext* opCtx, const KeyDataType& key) const {
int position = 0;
// Find the DiskLoc
bool found;
- DiskLoc bucket = _locate(txn, getRootLoc(txn), key, &position, &found, DiskLoc::min(), 1);
+ DiskLoc bucket = _locate(opCtx, getRootLoc(opCtx), key, &position, &found, DiskLoc::min(), 1);
while (!bucket.isNull()) {
- FullKey fullKey = getFullKey(getBucket(txn, bucket), position);
+ FullKey fullKey = getFullKey(getBucket(opCtx, bucket), position);
if (fullKey.header.isUsed()) {
return fullKey.data.woEqual(key);
}
- bucket = advance(txn, bucket, &position, 1);
+ bucket = advance(opCtx, bucket, &position, 1);
}
return false;
}
template <class BtreeLayout>
-Status BtreeLogic<BtreeLayout>::dupKeyCheck(OperationContext* txn,
+Status BtreeLogic<BtreeLayout>::dupKeyCheck(OperationContext* opCtx,
const BSONObj& key,
const DiskLoc& loc) const {
KeyDataOwnedType theKey(key);
- if (!wouldCreateDup(txn, theKey, loc)) {
+ if (!wouldCreateDup(opCtx, theKey, loc)) {
return Status::OK();
}
@@ -974,16 +974,16 @@ Status BtreeLogic<BtreeLayout>::dupKeyCheck(OperationContext* txn,
}
template <class BtreeLayout>
-bool BtreeLogic<BtreeLayout>::wouldCreateDup(OperationContext* txn,
+bool BtreeLogic<BtreeLayout>::wouldCreateDup(OperationContext* opCtx,
const KeyDataType& key,
const DiskLoc self) const {
int position;
bool found;
- DiskLoc posLoc = _locate(txn, getRootLoc(txn), key, &position, &found, DiskLoc::min(), 1);
+ DiskLoc posLoc = _locate(opCtx, getRootLoc(opCtx), key, &position, &found, DiskLoc::min(), 1);
while (!posLoc.isNull()) {
- FullKey fullKey = getFullKey(getBucket(txn, posLoc), position);
+ FullKey fullKey = getFullKey(getBucket(opCtx, posLoc), position);
if (fullKey.header.isUsed()) {
// TODO: we may not need fullKey.data until we know fullKey.header.isUsed() here
// and elsewhere.
@@ -993,7 +993,7 @@ bool BtreeLogic<BtreeLayout>::wouldCreateDup(OperationContext* txn,
break;
}
- posLoc = advance(txn, posLoc, &position, 1);
+ posLoc = advance(opCtx, posLoc, &position, 1);
}
return false;
}
@@ -1022,7 +1022,7 @@ string BtreeLogic<BtreeLayout>::dupKeyError(const KeyDataType& key) const {
* note result might be an Unused location!
*/
template <class BtreeLayout>
-Status BtreeLogic<BtreeLayout>::_find(OperationContext* txn,
+Status BtreeLogic<BtreeLayout>::_find(OperationContext* opCtx,
BucketType* bucket,
const KeyDataType& key,
const DiskLoc& recordLoc,
@@ -1056,8 +1056,8 @@ Status BtreeLogic<BtreeLayout>::_find(OperationContext* txn,
// This is expensive and we only want to do it once(? -- when would
// it happen twice).
dupsCheckedYet = true;
- if (exists(txn, key)) {
- if (wouldCreateDup(txn, key, genericRecordLoc)) {
+ if (exists(opCtx, key)) {
+ if (wouldCreateDup(opCtx, key, genericRecordLoc)) {
return Status(ErrorCodes::DuplicateKey, dupKeyError(key), 11000);
} else {
return Status(ErrorCodes::DuplicateKeyValue,
@@ -1123,54 +1123,54 @@ Status BtreeLogic<BtreeLayout>::_find(OperationContext* txn,
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::delBucket(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::delBucket(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc) {
- invariant(bucketLoc != getRootLoc(txn));
+ invariant(bucketLoc != getRootLoc(opCtx));
_cursorRegistry->invalidateCursorsForBucket(bucketLoc);
- BucketType* p = getBucket(txn, bucket->parent);
- int parentIdx = indexInParent(txn, bucket, bucketLoc);
- *txn->recoveryUnit()->writing(&childLocForPos(p, parentIdx)) = DiskLoc();
- deallocBucket(txn, bucket, bucketLoc);
+ BucketType* p = getBucket(opCtx, bucket->parent);
+ int parentIdx = indexInParent(opCtx, bucket, bucketLoc);
+ *opCtx->recoveryUnit()->writing(&childLocForPos(p, parentIdx)) = DiskLoc();
+ deallocBucket(opCtx, bucket, bucketLoc);
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::deallocBucket(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::deallocBucket(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc) {
bucket->n = BtreeLayout::INVALID_N_SENTINEL;
bucket->parent.Null();
- _recordStore->deleteRecord(txn, bucketLoc.toRecordId());
+ _recordStore->deleteRecord(opCtx, bucketLoc.toRecordId());
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::restorePosition(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::restorePosition(OperationContext* opCtx,
const BSONObj& savedKey,
const DiskLoc& savedLoc,
int direction,
DiskLoc* bucketLocInOut,
int* keyOffsetInOut) const {
// The caller has to ensure validity of the saved cursor using the SavedCursorRegistry
- BucketType* bucket = getBucket(txn, *bucketLocInOut);
+ BucketType* bucket = getBucket(opCtx, *bucketLocInOut);
invariant(bucket);
invariant(BtreeLayout::INVALID_N_SENTINEL != bucket->n);
if (_keyIsAt(savedKey, savedLoc, bucket, *keyOffsetInOut)) {
- skipUnusedKeys(txn, bucketLocInOut, keyOffsetInOut, direction);
+ skipUnusedKeys(opCtx, bucketLocInOut, keyOffsetInOut, direction);
return;
}
if (*keyOffsetInOut > 0) {
(*keyOffsetInOut)--;
if (_keyIsAt(savedKey, savedLoc, bucket, *keyOffsetInOut)) {
- skipUnusedKeys(txn, bucketLocInOut, keyOffsetInOut, direction);
+ skipUnusedKeys(opCtx, bucketLocInOut, keyOffsetInOut, direction);
return;
}
}
- locate(txn, savedKey, savedLoc, direction, keyOffsetInOut, bucketLocInOut);
+ locate(opCtx, savedKey, savedLoc, direction, keyOffsetInOut, bucketLocInOut);
}
template <class BtreeLayout>
@@ -1193,7 +1193,7 @@ bool BtreeLogic<BtreeLayout>::_keyIsAt(const BSONObj& savedKey,
* May delete the bucket 'bucket' rendering 'bucketLoc' invalid.
*/
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::delKeyAtPos(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::delKeyAtPos(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int p) {
@@ -1205,24 +1205,24 @@ void BtreeLogic<BtreeLayout>::delKeyAtPos(OperationContext* txn,
if (isHead(bucket)) {
// we don't delete the top bucket ever
} else {
- if (!mayBalanceWithNeighbors(txn, bucket, bucketLoc)) {
+ if (!mayBalanceWithNeighbors(opCtx, bucket, bucketLoc)) {
// An empty bucket is only allowed as a txnient state. If
// there are no neighbors to balance with, we delete ourself.
// This condition is only expected in legacy btrees.
- delBucket(txn, bucket, bucketLoc);
+ delBucket(opCtx, bucket, bucketLoc);
}
}
return;
}
- deleteInternalKey(txn, bucket, bucketLoc, p);
+ deleteInternalKey(opCtx, bucket, bucketLoc, p);
return;
}
if (left.isNull()) {
_delKeyAtPos(bucket, p);
- mayBalanceWithNeighbors(txn, bucket, bucketLoc);
+ mayBalanceWithNeighbors(opCtx, bucket, bucketLoc);
} else {
- deleteInternalKey(txn, bucket, bucketLoc, p);
+ deleteInternalKey(opCtx, bucket, bucketLoc, p);
}
}
@@ -1250,7 +1250,7 @@ void BtreeLogic<BtreeLayout>::delKeyAtPos(OperationContext* txn,
* legacy btree.
*/
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::deleteInternalKey(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::deleteInternalKey(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int keypos) {
@@ -1259,11 +1259,11 @@ void BtreeLogic<BtreeLayout>::deleteInternalKey(OperationContext* txn,
invariant(!lchild.isNull() || !rchild.isNull());
int advanceDirection = lchild.isNull() ? 1 : -1;
int advanceKeyOfs = keypos;
- DiskLoc advanceLoc = advance(txn, bucketLoc, &advanceKeyOfs, advanceDirection);
+ DiskLoc advanceLoc = advance(opCtx, bucketLoc, &advanceKeyOfs, advanceDirection);
// advanceLoc must be a descentant of thisLoc, because thisLoc has a
// child in the proper direction and all descendants of thisLoc must be
// nonempty because they are not the root.
- BucketType* advanceBucket = getBucket(txn, advanceLoc);
+ BucketType* advanceBucket = getBucket(opCtx, advanceLoc);
if (!childLocForPos(advanceBucket, advanceKeyOfs).isNull() ||
!childLocForPos(advanceBucket, advanceKeyOfs + 1).isNull()) {
@@ -1275,7 +1275,7 @@ void BtreeLogic<BtreeLayout>::deleteInternalKey(OperationContext* txn,
// Because advanceLoc is a descendant of thisLoc, updating thisLoc will
// not affect packing or keys of advanceLoc and kn will be stable
// during the following setInternalKey()
- setInternalKey(txn,
+ setInternalKey(opCtx,
bucket,
bucketLoc,
keypos,
@@ -1283,31 +1283,31 @@ void BtreeLogic<BtreeLayout>::deleteInternalKey(OperationContext* txn,
kn.data,
childLocForPos(bucket, keypos),
childLocForPos(bucket, keypos + 1));
- delKeyAtPos(txn, btreemod(txn, advanceBucket), advanceLoc, advanceKeyOfs);
+ delKeyAtPos(opCtx, btreemod(opCtx, advanceBucket), advanceLoc, advanceKeyOfs);
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::replaceWithNextChild(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::replaceWithNextChild(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc) {
invariant(bucket->n == 0 && !bucket->nextChild.isNull());
if (bucket->parent.isNull()) {
- invariant(getRootLoc(txn) == bucketLoc);
- _headManager->setHead(txn, bucket->nextChild.toRecordId());
+ invariant(getRootLoc(opCtx) == bucketLoc);
+ _headManager->setHead(opCtx, bucket->nextChild.toRecordId());
} else {
- BucketType* parentBucket = getBucket(txn, bucket->parent);
- int bucketIndexInParent = indexInParent(txn, bucket, bucketLoc);
- *txn->recoveryUnit()->writing(&childLocForPos(parentBucket, bucketIndexInParent)) =
+ BucketType* parentBucket = getBucket(opCtx, bucket->parent);
+ int bucketIndexInParent = indexInParent(opCtx, bucket, bucketLoc);
+ *opCtx->recoveryUnit()->writing(&childLocForPos(parentBucket, bucketIndexInParent)) =
bucket->nextChild;
}
- *txn->recoveryUnit()->writing(&getBucket(txn, bucket->nextChild)->parent) = bucket->parent;
+ *opCtx->recoveryUnit()->writing(&getBucket(opCtx, bucket->nextChild)->parent) = bucket->parent;
_cursorRegistry->invalidateCursorsForBucket(bucketLoc);
- deallocBucket(txn, bucket, bucketLoc);
+ deallocBucket(opCtx, bucket, bucketLoc);
}
template <class BtreeLayout>
-bool BtreeLogic<BtreeLayout>::canMergeChildren(OperationContext* txn,
+bool BtreeLogic<BtreeLayout>::canMergeChildren(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
const int leftIndex) {
@@ -1322,8 +1322,8 @@ bool BtreeLogic<BtreeLayout>::canMergeChildren(OperationContext* txn,
int pos = 0;
- BucketType* leftBucket = getBucket(txn, leftNodeLoc);
- BucketType* rightBucket = getBucket(txn, rightNodeLoc);
+ BucketType* leftBucket = getBucket(opCtx, leftNodeLoc);
+ BucketType* rightBucket = getBucket(opCtx, rightNodeLoc);
int sum = BucketType::HeaderSize + _packedDataSize(leftBucket, pos) +
_packedDataSize(rightBucket, pos) + getFullKey(bucket, leftIndex).data.dataSize() +
@@ -1337,14 +1337,14 @@ bool BtreeLogic<BtreeLayout>::canMergeChildren(OperationContext* txn,
* splitPos().
*/
template <class BtreeLayout>
-int BtreeLogic<BtreeLayout>::_rebalancedSeparatorPos(OperationContext* txn,
+int BtreeLogic<BtreeLayout>::_rebalancedSeparatorPos(OperationContext* opCtx,
BucketType* bucket,
int leftIndex) {
int split = -1;
int rightSize = 0;
- const BucketType* l = childForPos(txn, bucket, leftIndex);
- const BucketType* r = childForPos(txn, bucket, leftIndex + 1);
+ const BucketType* l = childForPos(opCtx, bucket, leftIndex);
+ const BucketType* r = childForPos(opCtx, bucket, leftIndex + 1);
int KNS = sizeof(KeyHeaderType);
int rightSizeLimit = (l->topSize + l->n * KNS + getFullKey(bucket, leftIndex).data.dataSize() +
@@ -1391,15 +1391,15 @@ int BtreeLogic<BtreeLayout>::_rebalancedSeparatorPos(OperationContext* txn,
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::doMergeChildren(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::doMergeChildren(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int leftIndex) {
DiskLoc leftNodeLoc = childLocForPos(bucket, leftIndex);
DiskLoc rightNodeLoc = childLocForPos(bucket, leftIndex + 1);
- BucketType* l = btreemod(txn, getBucket(txn, leftNodeLoc));
- BucketType* r = btreemod(txn, getBucket(txn, rightNodeLoc));
+ BucketType* l = btreemod(opCtx, getBucket(opCtx, leftNodeLoc));
+ BucketType* r = btreemod(opCtx, getBucket(opCtx, rightNodeLoc));
int pos = 0;
_packReadyForMod(l, pos);
@@ -1417,8 +1417,8 @@ void BtreeLogic<BtreeLayout>::doMergeChildren(OperationContext* txn,
}
l->nextChild = r->nextChild;
- fixParentPtrs(txn, l, leftNodeLoc, oldLNum);
- delBucket(txn, r, rightNodeLoc);
+ fixParentPtrs(opCtx, l, leftNodeLoc, oldLNum);
+ delBucket(opCtx, r, rightNodeLoc);
childLocForPos(bucket, leftIndex + 1) = leftNodeLoc;
childLocForPos(bucket, leftIndex) = DiskLoc();
@@ -1429,18 +1429,18 @@ void BtreeLogic<BtreeLayout>::doMergeChildren(OperationContext* txn,
//
// TODO To ensure all leaves are of equal height, we should ensure this is only called
// on the root.
- replaceWithNextChild(txn, bucket, bucketLoc);
+ replaceWithNextChild(opCtx, bucket, bucketLoc);
} else {
- mayBalanceWithNeighbors(txn, bucket, bucketLoc);
+ mayBalanceWithNeighbors(opCtx, bucket, bucketLoc);
}
}
template <class BtreeLayout>
-int BtreeLogic<BtreeLayout>::indexInParent(OperationContext* txn,
+int BtreeLogic<BtreeLayout>::indexInParent(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc) const {
invariant(!bucket->parent.isNull());
- const BucketType* p = getBucket(txn, bucket->parent);
+ const BucketType* p = getBucket(opCtx, bucket->parent);
if (p->nextChild == bucketLoc) {
return p->n;
}
@@ -1461,22 +1461,22 @@ int BtreeLogic<BtreeLayout>::indexInParent(OperationContext* txn,
}
template <class BtreeLayout>
-bool BtreeLogic<BtreeLayout>::tryBalanceChildren(OperationContext* txn,
+bool BtreeLogic<BtreeLayout>::tryBalanceChildren(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int leftIndex) {
// If we can merge, then we must merge rather than balance to preserve bucket utilization
// constraints.
- if (canMergeChildren(txn, bucket, bucketLoc, leftIndex)) {
+ if (canMergeChildren(opCtx, bucket, bucketLoc, leftIndex)) {
return false;
}
- doBalanceChildren(txn, btreemod(txn, bucket), bucketLoc, leftIndex);
+ doBalanceChildren(opCtx, btreemod(opCtx, bucket), bucketLoc, leftIndex);
return true;
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::doBalanceLeftToRight(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::doBalanceLeftToRight(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int leftIndex,
@@ -1500,14 +1500,14 @@ void BtreeLogic<BtreeLayout>::doBalanceLeftToRight(OperationContext* txn,
FullKey leftIndexKN = getFullKey(bucket, leftIndex);
setKey(r, rAdd - 1, leftIndexKN.recordLoc, leftIndexKN.data, l->nextChild);
- fixParentPtrs(txn, r, rchild, 0, rAdd - 1);
+ fixParentPtrs(opCtx, r, rchild, 0, rAdd - 1);
FullKey kn = getFullKey(l, split);
l->nextChild = kn.prevChildBucket;
// Because lchild is a descendant of thisLoc, updating thisLoc will not affect packing or
// keys of lchild and kn will be stable during the following setInternalKey()
- setInternalKey(txn, bucket, bucketLoc, leftIndex, kn.recordLoc, kn.data, lchild, rchild);
+ setInternalKey(opCtx, bucket, bucketLoc, leftIndex, kn.recordLoc, kn.data, lchild, rchild);
// lchild and rchild cannot be merged, so there must be >0 (actually more) keys to the left
// of split.
@@ -1516,7 +1516,7 @@ void BtreeLogic<BtreeLayout>::doBalanceLeftToRight(OperationContext* txn,
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::doBalanceRightToLeft(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::doBalanceRightToLeft(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int leftIndex,
@@ -1546,11 +1546,11 @@ void BtreeLogic<BtreeLayout>::doBalanceRightToLeft(OperationContext* txn,
FullKey kn = getFullKey(r, split - lN - 1);
l->nextChild = kn.prevChildBucket;
// Child lN was lchild's old nextChild, and don't need to fix that one.
- fixParentPtrs(txn, l, lchild, lN + 1, l->n);
+ fixParentPtrs(opCtx, l, lchild, lN + 1, l->n);
// Because rchild is a descendant of thisLoc, updating thisLoc will
// not affect packing or keys of rchild and kn will be stable
// during the following setInternalKey()
- setInternalKey(txn, bucket, bucketLoc, leftIndex, kn.recordLoc, kn.data, lchild, rchild);
+ setInternalKey(opCtx, bucket, bucketLoc, leftIndex, kn.recordLoc, kn.data, lchild, rchild);
}
// lchild and rchild cannot be merged, so there must be >0 (actually more)
@@ -1560,7 +1560,7 @@ void BtreeLogic<BtreeLayout>::doBalanceRightToLeft(OperationContext* txn,
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::doBalanceChildren(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::doBalanceChildren(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int leftIndex) {
@@ -1568,26 +1568,26 @@ void BtreeLogic<BtreeLayout>::doBalanceChildren(OperationContext* txn,
DiskLoc rchild = childLocForPos(bucket, leftIndex + 1);
int zeropos = 0;
- BucketType* l = btreemod(txn, getBucket(txn, lchild));
+ BucketType* l = btreemod(opCtx, getBucket(opCtx, lchild));
_packReadyForMod(l, zeropos);
- BucketType* r = btreemod(txn, getBucket(txn, rchild));
+ BucketType* r = btreemod(opCtx, getBucket(opCtx, rchild));
_packReadyForMod(r, zeropos);
- int split = _rebalancedSeparatorPos(txn, bucket, leftIndex);
+ int split = _rebalancedSeparatorPos(opCtx, bucket, leftIndex);
// By definition, if we are below the low water mark and cannot merge
// then we must actively balance.
invariant(split != l->n);
if (split < l->n) {
- doBalanceLeftToRight(txn, bucket, bucketLoc, leftIndex, split, l, lchild, r, rchild);
+ doBalanceLeftToRight(opCtx, bucket, bucketLoc, leftIndex, split, l, lchild, r, rchild);
} else {
- doBalanceRightToLeft(txn, bucket, bucketLoc, leftIndex, split, l, lchild, r, rchild);
+ doBalanceRightToLeft(opCtx, bucket, bucketLoc, leftIndex, split, l, lchild, r, rchild);
}
}
template <class BtreeLayout>
-bool BtreeLogic<BtreeLayout>::mayBalanceWithNeighbors(OperationContext* txn,
+bool BtreeLogic<BtreeLayout>::mayBalanceWithNeighbors(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc) {
if (bucket->parent.isNull()) {
@@ -1598,8 +1598,8 @@ bool BtreeLogic<BtreeLayout>::mayBalanceWithNeighbors(OperationContext* txn,
return false;
}
- BucketType* p = getBucket(txn, bucket->parent);
- int parentIdx = indexInParent(txn, bucket, bucketLoc);
+ BucketType* p = getBucket(opCtx, bucket->parent);
+ int parentIdx = indexInParent(opCtx, bucket, bucketLoc);
// TODO will missing neighbor case be possible long term? Should we try to merge/balance
// somehow in that case if so?
@@ -1609,20 +1609,20 @@ bool BtreeLogic<BtreeLayout>::mayBalanceWithNeighbors(OperationContext* txn,
// Balance if possible on one side - we merge only if absolutely necessary to preserve btree
// bucket utilization constraints since that's a more heavy duty operation (especially if we
// must re-split later).
- if (mayBalanceRight && tryBalanceChildren(txn, p, bucket->parent, parentIdx)) {
+ if (mayBalanceRight && tryBalanceChildren(opCtx, p, bucket->parent, parentIdx)) {
return true;
}
- if (mayBalanceLeft && tryBalanceChildren(txn, p, bucket->parent, parentIdx - 1)) {
+ if (mayBalanceLeft && tryBalanceChildren(opCtx, p, bucket->parent, parentIdx - 1)) {
return true;
}
- BucketType* pm = btreemod(txn, getBucket(txn, bucket->parent));
+ BucketType* pm = btreemod(opCtx, getBucket(opCtx, bucket->parent));
if (mayBalanceRight) {
- doMergeChildren(txn, pm, bucket->parent, parentIdx);
+ doMergeChildren(opCtx, pm, bucket->parent, parentIdx);
return true;
} else if (mayBalanceLeft) {
- doMergeChildren(txn, pm, bucket->parent, parentIdx - 1);
+ doMergeChildren(opCtx, pm, bucket->parent, parentIdx - 1);
return true;
}
@@ -1630,25 +1630,25 @@ bool BtreeLogic<BtreeLayout>::mayBalanceWithNeighbors(OperationContext* txn,
}
template <class BtreeLayout>
-bool BtreeLogic<BtreeLayout>::unindex(OperationContext* txn,
+bool BtreeLogic<BtreeLayout>::unindex(OperationContext* opCtx,
const BSONObj& key,
const DiskLoc& recordLoc) {
int pos;
bool found = false;
KeyDataOwnedType ownedKey(key);
- DiskLoc loc = _locate(txn, getRootLoc(txn), ownedKey, &pos, &found, recordLoc, 1);
+ DiskLoc loc = _locate(opCtx, getRootLoc(opCtx), ownedKey, &pos, &found, recordLoc, 1);
if (found) {
- BucketType* bucket = btreemod(txn, getBucket(txn, loc));
- delKeyAtPos(txn, bucket, loc, pos);
- assertValid(_indexName, getRoot(txn), _ordering);
+ BucketType* bucket = btreemod(opCtx, getBucket(opCtx, loc));
+ delKeyAtPos(opCtx, bucket, loc, pos);
+ assertValid(_indexName, getRoot(opCtx), _ordering);
}
return found;
}
template <class BtreeLayout>
-bool BtreeLogic<BtreeLayout>::isEmpty(OperationContext* txn) const {
- return getRoot(txn)->n == 0;
+bool BtreeLogic<BtreeLayout>::isEmpty(OperationContext* opCtx) const {
+ return getRoot(opCtx)->n == 0;
}
/**
@@ -1656,12 +1656,12 @@ bool BtreeLogic<BtreeLayout>::isEmpty(OperationContext* txn) const {
* Maybe get rid of parent ptrs?
*/
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::fixParentPtrs(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::fixParentPtrs(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int firstIndex,
int lastIndex) {
- invariant(getBucket(txn, bucketLoc) == bucket);
+ invariant(getBucket(opCtx, bucketLoc) == bucket);
if (lastIndex == -1) {
lastIndex = bucket->n;
@@ -1670,13 +1670,13 @@ void BtreeLogic<BtreeLayout>::fixParentPtrs(OperationContext* txn,
for (int i = firstIndex; i <= lastIndex; i++) {
const DiskLoc childLoc = childLocForPos(bucket, i);
if (!childLoc.isNull()) {
- *txn->recoveryUnit()->writing(&getBucket(txn, childLoc)->parent) = bucketLoc;
+ *opCtx->recoveryUnit()->writing(&getBucket(opCtx, childLoc)->parent) = bucketLoc;
}
}
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::setInternalKey(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::setInternalKey(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int keypos,
@@ -1696,7 +1696,7 @@ void BtreeLogic<BtreeLayout>::setInternalKey(OperationContext* txn,
// Just set temporarily - required to pass validation in insertHere()
childLocForPos(bucket, keypos) = lchild;
- insertHere(txn, bucketLoc, keypos, key, recordLoc, lchild, rchild);
+ insertHere(opCtx, bucketLoc, keypos, key, recordLoc, lchild, rchild);
}
/**
@@ -1710,19 +1710,19 @@ void BtreeLogic<BtreeLayout>::setInternalKey(OperationContext* txn,
* intent code in basicInsert().
*/
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::insertHere(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::insertHere(OperationContext* opCtx,
const DiskLoc bucketLoc,
int pos,
const KeyDataType& key,
const DiskLoc recordLoc,
const DiskLoc leftChildLoc,
const DiskLoc rightChildLoc) {
- BucketType* bucket = getBucket(txn, bucketLoc);
+ BucketType* bucket = getBucket(opCtx, bucketLoc);
- if (!basicInsert(txn, bucket, bucketLoc, pos, key, recordLoc)) {
+ if (!basicInsert(opCtx, bucket, bucketLoc, pos, key, recordLoc)) {
// If basicInsert() fails, the bucket will be packed as required by split().
- split(txn,
- btreemod(txn, bucket),
+ split(opCtx,
+ btreemod(opCtx, bucket),
bucketLoc,
pos,
recordLoc,
@@ -1741,9 +1741,9 @@ void BtreeLogic<BtreeLayout>::insertHere(OperationContext* txn,
}
kn->prevChildBucket = bucket->nextChild;
invariant(kn->prevChildBucket == leftChildLoc);
- *txn->recoveryUnit()->writing(&bucket->nextChild) = rightChildLoc;
+ *opCtx->recoveryUnit()->writing(&bucket->nextChild) = rightChildLoc;
if (!rightChildLoc.isNull()) {
- *txn->recoveryUnit()->writing(&getBucket(txn, rightChildLoc)->parent) = bucketLoc;
+ *opCtx->recoveryUnit()->writing(&getBucket(opCtx, rightChildLoc)->parent) = bucketLoc;
}
} else {
kn->prevChildBucket = leftChildLoc;
@@ -1755,13 +1755,13 @@ void BtreeLogic<BtreeLayout>::insertHere(OperationContext* txn,
// Intent declared in basicInsert()
*const_cast<LocType*>(pc) = rightChildLoc;
if (!rightChildLoc.isNull()) {
- *txn->recoveryUnit()->writing(&getBucket(txn, rightChildLoc)->parent) = bucketLoc;
+ *opCtx->recoveryUnit()->writing(&getBucket(opCtx, rightChildLoc)->parent) = bucketLoc;
}
}
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::split(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::split(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int keypos,
@@ -1770,8 +1770,8 @@ void BtreeLogic<BtreeLayout>::split(OperationContext* txn,
const DiskLoc lchild,
const DiskLoc rchild) {
int split = splitPos(bucket, keypos);
- DiskLoc rLoc = _addBucket(txn);
- BucketType* r = btreemod(txn, getBucket(txn, rLoc));
+ DiskLoc rLoc = _addBucket(opCtx);
+ BucketType* r = btreemod(opCtx, getBucket(opCtx, rLoc));
for (int i = split + 1; i < bucket->n; i++) {
FullKey kn = getFullKey(bucket, i);
@@ -1781,7 +1781,7 @@ void BtreeLogic<BtreeLayout>::split(OperationContext* txn,
assertValid(_indexName, r, _ordering);
r = NULL;
- fixParentPtrs(txn, getBucket(txn, rLoc), rLoc);
+ fixParentPtrs(opCtx, getBucket(opCtx, rLoc), rLoc);
FullKey splitkey = getFullKey(bucket, split);
// splitkey key gets promoted, its children will be thisLoc (l) and rLoc (r)
@@ -1792,20 +1792,20 @@ void BtreeLogic<BtreeLayout>::split(OperationContext* txn,
if (bucket->parent.isNull()) {
// promote splitkey to a parent this->node make a new parent if we were the root
- DiskLoc L = _addBucket(txn);
- BucketType* p = btreemod(txn, getBucket(txn, L));
+ DiskLoc L = _addBucket(opCtx);
+ BucketType* p = btreemod(opCtx, getBucket(opCtx, L));
invariant(pushBack(p, splitkey.recordLoc, splitkey.data, bucketLoc));
p->nextChild = rLoc;
assertValid(_indexName, p, _ordering);
bucket->parent = L;
- _headManager->setHead(txn, L.toRecordId());
- *txn->recoveryUnit()->writing(&getBucket(txn, rLoc)->parent) = bucket->parent;
+ _headManager->setHead(opCtx, L.toRecordId());
+ *opCtx->recoveryUnit()->writing(&getBucket(opCtx, rLoc)->parent) = bucket->parent;
} else {
// set this before calling _insert - if it splits it will do fixParent() logic and
// change the value.
- *txn->recoveryUnit()->writing(&getBucket(txn, rLoc)->parent) = bucket->parent;
- _insert(txn,
- getBucket(txn, bucket->parent),
+ *opCtx->recoveryUnit()->writing(&getBucket(opCtx, rLoc)->parent) = bucket->parent;
+ _insert(opCtx,
+ getBucket(opCtx, bucket->parent),
bucket->parent,
splitkey.data,
splitkey.recordLoc,
@@ -1820,11 +1820,11 @@ void BtreeLogic<BtreeLayout>::split(OperationContext* txn,
// add our this->new key, there is room this->now
if (keypos <= split) {
- insertHere(txn, bucketLoc, newpos, key, recordLoc, lchild, rchild);
+ insertHere(opCtx, bucketLoc, newpos, key, recordLoc, lchild, rchild);
} else {
int kp = keypos - split - 1;
invariant(kp >= 0);
- insertHere(txn, rLoc, kp, key, recordLoc, lchild, rchild);
+ insertHere(opCtx, rLoc, kp, key, recordLoc, lchild, rchild);
}
}
@@ -1842,24 +1842,24 @@ private:
};
template <class BtreeLayout>
-Status BtreeLogic<BtreeLayout>::initAsEmpty(OperationContext* txn) {
- if (!_headManager->getHead(txn).isNull()) {
+Status BtreeLogic<BtreeLayout>::initAsEmpty(OperationContext* opCtx) {
+ if (!_headManager->getHead(opCtx).isNull()) {
return Status(ErrorCodes::InternalError, "index already initialized");
}
- _headManager->setHead(txn, _addBucket(txn).toRecordId());
+ _headManager->setHead(opCtx, _addBucket(opCtx).toRecordId());
return Status::OK();
}
template <class BtreeLayout>
-DiskLoc BtreeLogic<BtreeLayout>::_addBucket(OperationContext* txn) {
+DiskLoc BtreeLogic<BtreeLayout>::_addBucket(OperationContext* opCtx) {
DummyDocWriter docWriter(BtreeLayout::BucketSize);
- StatusWith<RecordId> loc = _recordStore->insertRecordWithDocWriter(txn, &docWriter);
+ StatusWith<RecordId> loc = _recordStore->insertRecordWithDocWriter(opCtx, &docWriter);
// XXX: remove this(?) or turn into massert or sanely bubble it back up.
uassertStatusOK(loc.getStatus());
// this is a new bucket, not referenced by anyone, probably don't need this lock
- BucketType* b = btreemod(txn, getBucket(txn, loc.getValue()));
+ BucketType* b = btreemod(opCtx, getBucket(opCtx, loc.getValue()));
init(b);
return DiskLoc::fromRecordId(loc.getValue());
}
@@ -1888,20 +1888,20 @@ void BtreeLogic<BtreeLayout>::dumpBucket(const BucketType* bucket, int indentLen
}
template <class BtreeLayout>
-DiskLoc BtreeLogic<BtreeLayout>::getDiskLoc(OperationContext* txn,
+DiskLoc BtreeLogic<BtreeLayout>::getDiskLoc(OperationContext* opCtx,
const DiskLoc& bucketLoc,
const int keyOffset) const {
invariant(!bucketLoc.isNull());
- BucketType* bucket = getBucket(txn, bucketLoc);
+ BucketType* bucket = getBucket(opCtx, bucketLoc);
return getKeyHeader(bucket, keyOffset).recordLoc;
}
template <class BtreeLayout>
-BSONObj BtreeLogic<BtreeLayout>::getKey(OperationContext* txn,
+BSONObj BtreeLogic<BtreeLayout>::getKey(OperationContext* opCtx,
const DiskLoc& bucketLoc,
const int keyOffset) const {
invariant(!bucketLoc.isNull());
- BucketType* bucket = getBucket(txn, bucketLoc);
+ BucketType* bucket = getBucket(opCtx, bucketLoc);
int n = bucket->n;
invariant(n != BtreeLayout::INVALID_N_SENTINEL);
invariant(n >= 0);
@@ -1920,7 +1920,7 @@ BSONObj BtreeLogic<BtreeLayout>::getKey(OperationContext* txn,
}
template <class BtreeLayout>
-IndexKeyEntry BtreeLogic<BtreeLayout>::getRandomEntry(OperationContext* txn) const {
+IndexKeyEntry BtreeLogic<BtreeLayout>::getRandomEntry(OperationContext* opCtx) const {
// To ensure a uniform distribution, all keys must have an equal probability of being selected.
// Specifically, a key from the root should have the same probability of being selected as a key
// from a leaf.
@@ -1934,19 +1934,19 @@ IndexKeyEntry BtreeLogic<BtreeLayout>::getRandomEntry(OperationContext* txn) con
// As a simplification, we treat all buckets in a given level as having the same number of
// children. While this is inaccurate if the tree isn't perfectly balanced or if key-size
// greatly varies, it is assumed to be good enough for this purpose.
- invariant(!isEmpty(txn));
- BucketType* root = getRoot(txn);
+ invariant(!isEmpty(opCtx));
+ BucketType* root = getRoot(opCtx);
vector<int64_t> nKeysInLevel;
vector<FullKey> selectedKeys;
- auto& prng = txn->getClient()->getPrng();
+ auto& prng = opCtx->getClient()->getPrng();
int nRetries = 0;
const int kMaxRetries = 5;
do {
// See documentation below for description of parameters.
- recordRandomWalk(txn, &prng, root, 1, &nKeysInLevel, &selectedKeys);
+ recordRandomWalk(opCtx, &prng, root, 1, &nKeysInLevel, &selectedKeys);
} while (selectedKeys.empty() && nRetries++ < kMaxRetries);
massert(28826,
str::stream() << "index " << _indexName << " may be corrupt, please repair",
@@ -1989,7 +1989,7 @@ IndexKeyEntry BtreeLogic<BtreeLayout>::getRandomEntry(OperationContext* txn) con
* from the bucket we went through on the ith level of the B-tree.
*/
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::recordRandomWalk(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::recordRandomWalk(OperationContext* opCtx,
PseudoRandom* prng,
BucketType* curBucket,
int64_t nBucketsInCurrentLevel,
@@ -2008,34 +2008,34 @@ void BtreeLogic<BtreeLayout>::recordRandomWalk(OperationContext* txn,
// Select a random child and descend (if there are any).
int nChildren = nKeys + 1;
int nextChild = prng->nextInt32(nChildren);
- if (auto child = childForPos(txn, curBucket, nextChild)) {
+ if (auto child = childForPos(opCtx, curBucket, nextChild)) {
recordRandomWalk(
- txn, prng, child, nBucketsInCurrentLevel * nChildren, nKeysInLevel, selectedKeys);
+ opCtx, prng, child, nBucketsInCurrentLevel * nChildren, nKeysInLevel, selectedKeys);
}
}
template <class BtreeLayout>
-Status BtreeLogic<BtreeLayout>::touch(OperationContext* txn) const {
- return _recordStore->touch(txn, NULL);
+Status BtreeLogic<BtreeLayout>::touch(OperationContext* opCtx) const {
+ return _recordStore->touch(opCtx, NULL);
}
template <class BtreeLayout>
-long long BtreeLogic<BtreeLayout>::fullValidate(OperationContext* txn,
+long long BtreeLogic<BtreeLayout>::fullValidate(OperationContext* opCtx,
long long* unusedCount,
bool strict,
bool dumpBuckets,
unsigned depth) const {
- return _fullValidate(txn, getRootLoc(txn), unusedCount, strict, dumpBuckets, depth);
+ return _fullValidate(opCtx, getRootLoc(opCtx), unusedCount, strict, dumpBuckets, depth);
}
template <class BtreeLayout>
-long long BtreeLogic<BtreeLayout>::_fullValidate(OperationContext* txn,
+long long BtreeLogic<BtreeLayout>::_fullValidate(OperationContext* opCtx,
const DiskLoc bucketLoc,
long long* unusedCount,
bool strict,
bool dumpBuckets,
unsigned depth) const {
- BucketType* bucket = getBucket(txn, bucketLoc);
+ BucketType* bucket = getBucket(opCtx, bucketLoc);
assertValid(_indexName, bucket, _ordering, true);
if (dumpBuckets) {
@@ -2056,7 +2056,7 @@ long long BtreeLogic<BtreeLayout>::_fullValidate(OperationContext* txn,
if (!kn.prevChildBucket.isNull()) {
DiskLoc left = kn.prevChildBucket;
- BucketType* b = getBucket(txn, left);
+ BucketType* b = getBucket(opCtx, left);
if (strict) {
invariant(b->parent == bucketLoc);
@@ -2064,12 +2064,12 @@ long long BtreeLogic<BtreeLayout>::_fullValidate(OperationContext* txn,
wassert(b->parent == bucketLoc);
}
- keyCount += _fullValidate(txn, left, unusedCount, strict, dumpBuckets, depth + 1);
+ keyCount += _fullValidate(opCtx, left, unusedCount, strict, dumpBuckets, depth + 1);
}
}
if (!bucket->nextChild.isNull()) {
- BucketType* b = getBucket(txn, bucket->nextChild);
+ BucketType* b = getBucket(opCtx, bucket->nextChild);
if (strict) {
invariant(b->parent == bucketLoc);
} else {
@@ -2077,7 +2077,7 @@ long long BtreeLogic<BtreeLayout>::_fullValidate(OperationContext* txn,
}
keyCount +=
- _fullValidate(txn, bucket->nextChild, unusedCount, strict, dumpBuckets, depth + 1);
+ _fullValidate(opCtx, bucket->nextChild, unusedCount, strict, dumpBuckets, depth + 1);
}
return keyCount;
@@ -2149,7 +2149,7 @@ void BtreeLogic<BtreeLayout>::assertValid(const std::string& ns,
}
template <class BtreeLayout>
-Status BtreeLogic<BtreeLayout>::insert(OperationContext* txn,
+Status BtreeLogic<BtreeLayout>::insert(OperationContext* opCtx,
const BSONObj& rawKey,
const DiskLoc& value,
bool dupsAllowed) {
@@ -2161,15 +2161,15 @@ Status BtreeLogic<BtreeLayout>::insert(OperationContext* txn,
return Status(ErrorCodes::KeyTooLong, msg);
}
- Status status =
- _insert(txn, getRoot(txn), getRootLoc(txn), key, value, dupsAllowed, DiskLoc(), DiskLoc());
+ Status status = _insert(
+ opCtx, getRoot(opCtx), getRootLoc(opCtx), key, value, dupsAllowed, DiskLoc(), DiskLoc());
- assertValid(_indexName, getRoot(txn), _ordering);
+ assertValid(_indexName, getRoot(opCtx), _ordering);
return status;
}
template <class BtreeLayout>
-Status BtreeLogic<BtreeLayout>::_insert(OperationContext* txn,
+Status BtreeLogic<BtreeLayout>::_insert(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
const KeyDataType& key,
@@ -2181,7 +2181,7 @@ Status BtreeLogic<BtreeLayout>::_insert(OperationContext* txn,
int pos;
bool found;
- Status findStatus = _find(txn, bucket, key, recordLoc, !dupsAllowed, &pos, &found);
+ Status findStatus = _find(opCtx, bucket, key, recordLoc, !dupsAllowed, &pos, &found);
if (!findStatus.isOK()) {
return findStatus;
}
@@ -2192,7 +2192,7 @@ Status BtreeLogic<BtreeLayout>::_insert(OperationContext* txn,
LOG(4) << "btree _insert: reusing unused key" << endl;
massert(17433, "_insert: reuse key but lchild is not null", leftChild.isNull());
massert(17434, "_insert: reuse key but rchild is not null", rightChild.isNull());
- txn->recoveryUnit()->writing(&header)->setUsed();
+ opCtx->recoveryUnit()->writing(&header)->setUsed();
return Status::OK();
}
// The logic in _find() prohibits finding and returning a position if the 'used' bit
@@ -2210,11 +2210,11 @@ Status BtreeLogic<BtreeLayout>::_insert(OperationContext* txn,
// promoting a split key. These are the only two cases where _insert() is called
// currently.
if (childLoc.isNull() || !rightChild.isNull()) {
- insertHere(txn, bucketLoc, pos, key, recordLoc, leftChild, rightChild);
+ insertHere(opCtx, bucketLoc, pos, key, recordLoc, leftChild, rightChild);
return Status::OK();
} else {
- return _insert(txn,
- getBucket(txn, childLoc),
+ return _insert(opCtx,
+ getBucket(opCtx, childLoc),
childLoc,
key,
recordLoc,
@@ -2225,11 +2225,11 @@ Status BtreeLogic<BtreeLayout>::_insert(OperationContext* txn,
}
template <class BtreeLayout>
-DiskLoc BtreeLogic<BtreeLayout>::advance(OperationContext* txn,
+DiskLoc BtreeLogic<BtreeLayout>::advance(OperationContext* opCtx,
const DiskLoc& bucketLoc,
int* posInOut,
int direction) const {
- BucketType* bucket = getBucket(txn, bucketLoc);
+ BucketType* bucket = getBucket(opCtx, bucketLoc);
if (*posInOut < 0 || *posInOut >= bucket->n) {
log() << "ASSERT failure advancing btree bucket" << endl;
@@ -2246,7 +2246,7 @@ DiskLoc BtreeLogic<BtreeLayout>::advance(OperationContext* txn,
// Look down if we need to.
DiskLoc nextDownLoc = childLocForPos(bucket, ko + adj);
- BucketType* nextDown = getBucket(txn, nextDownLoc);
+ BucketType* nextDown = getBucket(opCtx, nextDownLoc);
if (NULL != nextDown) {
for (;;) {
if (direction > 0) {
@@ -2255,7 +2255,7 @@ DiskLoc BtreeLogic<BtreeLayout>::advance(OperationContext* txn,
*posInOut = nextDown->n - 1;
}
DiskLoc newNextDownLoc = childLocForPos(nextDown, *posInOut + adj);
- BucketType* newNextDownBucket = getBucket(txn, newNextDownLoc);
+ BucketType* newNextDownBucket = getBucket(opCtx, newNextDownLoc);
if (NULL == newNextDownBucket) {
break;
}
@@ -2273,12 +2273,12 @@ DiskLoc BtreeLogic<BtreeLayout>::advance(OperationContext* txn,
// Hit the end of the bucket, move up and over.
DiskLoc childLoc = bucketLoc;
- DiskLoc ancestor = getBucket(txn, bucketLoc)->parent;
+ DiskLoc ancestor = getBucket(opCtx, bucketLoc)->parent;
for (;;) {
if (ancestor.isNull()) {
break;
}
- BucketType* an = getBucket(txn, ancestor);
+ BucketType* an = getBucket(opCtx, ancestor);
for (int i = 0; i < an->n; i++) {
if (childLocForPos(an, i + adj) == childLoc) {
*posInOut = i;
@@ -2295,14 +2295,14 @@ DiskLoc BtreeLogic<BtreeLayout>::advance(OperationContext* txn,
}
template <class BtreeLayout>
-bool BtreeLogic<BtreeLayout>::keyIsUsed(OperationContext* txn,
+bool BtreeLogic<BtreeLayout>::keyIsUsed(OperationContext* opCtx,
const DiskLoc& loc,
const int& pos) const {
- return getKeyHeader(getBucket(txn, loc), pos).isUsed();
+ return getKeyHeader(getBucket(opCtx, loc), pos).isUsed();
}
template <class BtreeLayout>
-bool BtreeLogic<BtreeLayout>::locate(OperationContext* txn,
+bool BtreeLogic<BtreeLayout>::locate(OperationContext* opCtx,
const BSONObj& key,
const DiskLoc& recordLoc,
const int direction,
@@ -2315,13 +2315,13 @@ bool BtreeLogic<BtreeLayout>::locate(OperationContext* txn,
bool found = false;
KeyDataOwnedType owned(key);
- *bucketLocOut = _locate(txn, getRootLoc(txn), owned, posOut, &found, recordLoc, direction);
+ *bucketLocOut = _locate(opCtx, getRootLoc(opCtx), owned, posOut, &found, recordLoc, direction);
if (!found) {
return false;
}
- skipUnusedKeys(txn, bucketLocOut, posOut, direction);
+ skipUnusedKeys(opCtx, bucketLocOut, posOut, direction);
return found;
}
@@ -2331,7 +2331,7 @@ bool BtreeLogic<BtreeLayout>::locate(OperationContext* txn,
* Caller should have acquired lock on bucketLoc.
*/
template <class BtreeLayout>
-DiskLoc BtreeLogic<BtreeLayout>::_locate(OperationContext* txn,
+DiskLoc BtreeLogic<BtreeLayout>::_locate(OperationContext* opCtx,
const DiskLoc& bucketLoc,
const KeyDataType& key,
int* posOut,
@@ -2339,9 +2339,9 @@ DiskLoc BtreeLogic<BtreeLayout>::_locate(OperationContext* txn,
const DiskLoc& recordLoc,
const int direction) const {
int position;
- BucketType* bucket = getBucket(txn, bucketLoc);
+ BucketType* bucket = getBucket(opCtx, bucketLoc);
// XXX: owned to not owned conversion(?)
- _find(txn, bucket, key, recordLoc, false, &position, foundOut);
+ _find(opCtx, bucket, key, recordLoc, false, &position, foundOut);
// Look in our current bucket.
if (*foundOut) {
@@ -2353,7 +2353,7 @@ DiskLoc BtreeLogic<BtreeLayout>::_locate(OperationContext* txn,
DiskLoc childLoc = childLocForPos(bucket, position);
if (!childLoc.isNull()) {
- DiskLoc inChild = _locate(txn, childLoc, key, posOut, foundOut, recordLoc, direction);
+ DiskLoc inChild = _locate(opCtx, childLoc, key, posOut, foundOut, recordLoc, direction);
if (!inChild.isNull()) {
return inChild;
}
@@ -2389,12 +2389,12 @@ bool BtreeLogic<BtreeLayout>::isHead(BucketType* bucket) {
template <class BtreeLayout>
typename BtreeLogic<BtreeLayout>::BucketType* BtreeLogic<BtreeLayout>::getBucket(
- OperationContext* txn, const RecordId id) const {
+ OperationContext* opCtx, const RecordId id) const {
if (id.isNull()) {
return NULL;
}
- RecordData recordData = _recordStore->dataFor(txn, id);
+ RecordData recordData = _recordStore->dataFor(opCtx, id);
// we need to be working on the raw bytes, not a transient copy
invariant(!recordData.isOwned());
@@ -2404,20 +2404,20 @@ typename BtreeLogic<BtreeLayout>::BucketType* BtreeLogic<BtreeLayout>::getBucket
template <class BtreeLayout>
typename BtreeLogic<BtreeLayout>::BucketType* BtreeLogic<BtreeLayout>::getRoot(
- OperationContext* txn) const {
- return getBucket(txn, _headManager->getHead(txn));
+ OperationContext* opCtx) const {
+ return getBucket(opCtx, _headManager->getHead(opCtx));
}
template <class BtreeLayout>
-DiskLoc BtreeLogic<BtreeLayout>::getRootLoc(OperationContext* txn) const {
- return DiskLoc::fromRecordId(_headManager->getHead(txn));
+DiskLoc BtreeLogic<BtreeLayout>::getRootLoc(OperationContext* opCtx) const {
+ return DiskLoc::fromRecordId(_headManager->getHead(opCtx));
}
template <class BtreeLayout>
typename BtreeLogic<BtreeLayout>::BucketType* BtreeLogic<BtreeLayout>::childForPos(
- OperationContext* txn, BucketType* bucket, int pos) const {
+ OperationContext* opCtx, BucketType* bucket, int pos) const {
DiskLoc loc = childLocForPos(bucket, pos);
- return getBucket(txn, loc);
+ return getBucket(opCtx, loc);
}
template <class BtreeLayout>
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_logic.h b/src/mongo/db/storage/mmap_v1/btree/btree_logic.h
index 438cbc54f88..1f6f0645875 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_logic.h
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_logic.h
@@ -108,7 +108,7 @@ public:
class SetRightLeafLocChange;
- Builder(BtreeLogic* logic, OperationContext* txn, bool dupsAllowed);
+ Builder(BtreeLogic* logic, OperationContext* opCtx, bool dupsAllowed);
/**
* Creates and returns a new empty bucket to the right of leftSib, maintaining the
@@ -128,18 +128,18 @@ public:
std::unique_ptr<KeyDataOwnedType> _keyLast;
// Not owned.
- OperationContext* _txn;
+ OperationContext* _opCtx;
};
/**
* Caller owns the returned pointer.
* 'this' must outlive the returned pointer.
*/
- Builder* newBuilder(OperationContext* txn, bool dupsAllowed);
+ Builder* newBuilder(OperationContext* opCtx, bool dupsAllowed);
- Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const DiskLoc& loc) const;
+ Status dupKeyCheck(OperationContext* opCtx, const BSONObj& key, const DiskLoc& loc) const;
- Status insert(OperationContext* txn,
+ Status insert(OperationContext* opCtx,
const BSONObj& rawKey,
const DiskLoc& value,
bool dupsAllowed);
@@ -152,23 +152,23 @@ public:
* bucketLocOut would contain the bucket containing key which is before or after the
* searched one (dependent on the direction).
*/
- bool locate(OperationContext* txn,
+ bool locate(OperationContext* opCtx,
const BSONObj& key,
const DiskLoc& recordLoc,
const int direction,
int* posOut,
DiskLoc* bucketLocOut) const;
- void advance(OperationContext* txn,
+ void advance(OperationContext* opCtx,
DiskLoc* bucketLocInOut,
int* posInOut,
int direction) const;
- bool exists(OperationContext* txn, const KeyDataType& key) const;
+ bool exists(OperationContext* opCtx, const KeyDataType& key) const;
- bool unindex(OperationContext* txn, const BSONObj& key, const DiskLoc& recordLoc);
+ bool unindex(OperationContext* opCtx, const BSONObj& key, const DiskLoc& recordLoc);
- bool isEmpty(OperationContext* txn) const;
+ bool isEmpty(OperationContext* opCtx) const;
long long fullValidate(OperationContext*,
long long* unusedCount,
@@ -176,27 +176,29 @@ public:
bool dumpBuckets,
unsigned depth) const;
- DiskLoc getDiskLoc(OperationContext* txn, const DiskLoc& bucketLoc, const int keyOffset) const;
+ DiskLoc getDiskLoc(OperationContext* opCtx,
+ const DiskLoc& bucketLoc,
+ const int keyOffset) const;
- BSONObj getKey(OperationContext* txn, const DiskLoc& bucketLoc, const int keyOffset) const;
+ BSONObj getKey(OperationContext* opCtx, const DiskLoc& bucketLoc, const int keyOffset) const;
/**
* Returns a pseudo-random element from the tree. It is an error to call this method if the tree
* is empty.
*/
- IndexKeyEntry getRandomEntry(OperationContext* txn) const;
+ IndexKeyEntry getRandomEntry(OperationContext* opCtx) const;
- DiskLoc getHead(OperationContext* txn) const {
- return DiskLoc::fromRecordId(_headManager->getHead(txn));
+ DiskLoc getHead(OperationContext* opCtx) const {
+ return DiskLoc::fromRecordId(_headManager->getHead(opCtx));
}
- Status touch(OperationContext* txn) const;
+ Status touch(OperationContext* opCtx) const;
//
// Composite key navigation methods
//
- void customLocate(OperationContext* txn,
+ void customLocate(OperationContext* opCtx,
DiskLoc* locInOut,
int* keyOfsInOut,
const IndexSeekPoint& seekPoint,
@@ -208,7 +210,7 @@ public:
const IndexSeekPoint& seekPoint,
int direction) const;
- void restorePosition(OperationContext* txn,
+ void restorePosition(OperationContext* opCtx,
const BSONObj& savedKey,
const DiskLoc& savedLoc,
int direction,
@@ -222,7 +224,7 @@ public:
/**
* Returns OK if the index was uninitialized before, error status otherwise.
*/
- Status initAsEmpty(OperationContext* txn);
+ Status initAsEmpty(OperationContext* opCtx);
//
// Size constants
@@ -319,7 +321,7 @@ private:
static void setNotPacked(BucketType* bucket);
- static BucketType* btreemod(OperationContext* txn, BucketType* bucket);
+ static BucketType* btreemod(OperationContext* opCtx, BucketType* bucket);
static int splitPos(BucketType* bucket, int keypos);
@@ -345,7 +347,7 @@ private:
// information).
//
- bool basicInsert(OperationContext* txn,
+ bool basicInsert(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int& keypos,
@@ -354,16 +356,16 @@ private:
void dropFront(BucketType* bucket, int nDrop, int& refpos);
- void _pack(OperationContext* txn, BucketType* bucket, const DiskLoc thisLoc, int& refPos);
+ void _pack(OperationContext* opCtx, BucketType* bucket, const DiskLoc thisLoc, int& refPos);
- void customLocate(OperationContext* txn,
+ void customLocate(OperationContext* opCtx,
DiskLoc* locInOut,
int* keyOfsInOut,
const IndexSeekPoint& seekPoint,
int direction,
std::pair<DiskLoc, int>& bestParent) const;
- Status _find(OperationContext* txn,
+ Status _find(OperationContext* opCtx,
BucketType* bucket,
const KeyDataType& key,
const DiskLoc& recordLoc,
@@ -371,7 +373,7 @@ private:
int* keyPositionOut,
bool* foundOut) const;
- bool customFind(OperationContext* txn,
+ bool customFind(OperationContext* opCtx,
int low,
int high,
const IndexSeekPoint& seekPoint,
@@ -380,24 +382,24 @@ private:
int* keyOfsInOut,
std::pair<DiskLoc, int>& bestParent) const;
- void advanceToImpl(OperationContext* txn,
+ void advanceToImpl(OperationContext* opCtx,
DiskLoc* thisLocInOut,
int* keyOfsInOut,
const IndexSeekPoint& seekPoint,
int direction) const;
- bool wouldCreateDup(OperationContext* txn, const KeyDataType& key, const DiskLoc self) const;
+ bool wouldCreateDup(OperationContext* opCtx, const KeyDataType& key, const DiskLoc self) const;
- bool keyIsUsed(OperationContext* txn, const DiskLoc& loc, const int& pos) const;
+ bool keyIsUsed(OperationContext* opCtx, const DiskLoc& loc, const int& pos) const;
- void skipUnusedKeys(OperationContext* txn, DiskLoc* loc, int* pos, int direction) const;
+ void skipUnusedKeys(OperationContext* opCtx, DiskLoc* loc, int* pos, int direction) const;
- DiskLoc advance(OperationContext* txn,
+ DiskLoc advance(OperationContext* opCtx,
const DiskLoc& bucketLoc,
int* posInOut,
int direction) const;
- DiskLoc _locate(OperationContext* txn,
+ DiskLoc _locate(OperationContext* opCtx,
const DiskLoc& bucketLoc,
const KeyDataType& key,
int* posOut,
@@ -405,28 +407,28 @@ private:
const DiskLoc& recordLoc,
const int direction) const;
- long long _fullValidate(OperationContext* txn,
+ long long _fullValidate(OperationContext* opCtx,
const DiskLoc bucketLoc,
long long* unusedCount,
bool strict,
bool dumpBuckets,
unsigned depth) const;
- DiskLoc _addBucket(OperationContext* txn);
+ DiskLoc _addBucket(OperationContext* opCtx);
- bool canMergeChildren(OperationContext* txn,
+ bool canMergeChildren(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
const int leftIndex);
// has to look in children of 'bucket' and requires record store
- int _rebalancedSeparatorPos(OperationContext* txn, BucketType* bucket, int leftIndex);
+ int _rebalancedSeparatorPos(OperationContext* opCtx, BucketType* bucket, int leftIndex);
void _packReadyForMod(BucketType* bucket, int& refPos);
void truncateTo(BucketType* bucket, int N, int& refPos);
- void split(OperationContext* txn,
+ void split(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int keypos,
@@ -435,7 +437,7 @@ private:
const DiskLoc lchild,
const DiskLoc rchild);
- Status _insert(OperationContext* txn,
+ Status _insert(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
const KeyDataType& key,
@@ -445,7 +447,7 @@ private:
const DiskLoc rightChild);
// TODO take a BucketType*?
- void insertHere(OperationContext* txn,
+ void insertHere(OperationContext* opCtx,
const DiskLoc bucketLoc,
int pos,
const KeyDataType& key,
@@ -455,7 +457,7 @@ private:
std::string dupKeyError(const KeyDataType& key) const;
- void setInternalKey(OperationContext* txn,
+ void setInternalKey(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int keypos,
@@ -470,16 +472,16 @@ private:
int firstIndex = 0,
int lastIndex = -1);
- bool mayBalanceWithNeighbors(OperationContext* txn,
+ bool mayBalanceWithNeighbors(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc);
- void doBalanceChildren(OperationContext* txn,
+ void doBalanceChildren(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int leftIndex);
- void doBalanceLeftToRight(OperationContext* txn,
+ void doBalanceLeftToRight(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc thisLoc,
int leftIndex,
@@ -489,7 +491,7 @@ private:
BucketType* r,
const DiskLoc rchild);
- void doBalanceRightToLeft(OperationContext* txn,
+ void doBalanceRightToLeft(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc thisLoc,
int leftIndex,
@@ -499,30 +501,30 @@ private:
BucketType* r,
const DiskLoc rchild);
- bool tryBalanceChildren(OperationContext* txn,
+ bool tryBalanceChildren(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int leftIndex);
- int indexInParent(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc) const;
+ int indexInParent(OperationContext* opCtx, BucketType* bucket, const DiskLoc bucketLoc) const;
- void doMergeChildren(OperationContext* txn,
+ void doMergeChildren(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int leftIndex);
- void replaceWithNextChild(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc);
+ void replaceWithNextChild(OperationContext* opCtx, BucketType* bucket, const DiskLoc bucketLoc);
- void deleteInternalKey(OperationContext* txn,
+ void deleteInternalKey(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int keypos);
- void delKeyAtPos(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc, int p);
+ void delKeyAtPos(OperationContext* opCtx, BucketType* bucket, const DiskLoc bucketLoc, int p);
- void delBucket(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc);
+ void delBucket(OperationContext* opCtx, BucketType* bucket, const DiskLoc bucketLoc);
- void deallocBucket(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc);
+ void deallocBucket(OperationContext* opCtx, BucketType* bucket, const DiskLoc bucketLoc);
bool _keyIsAt(const BSONObj& savedKey,
const DiskLoc& savedLoc,
@@ -543,18 +545,18 @@ private:
const DiskLoc prevChild);
- BucketType* childForPos(OperationContext* txn, BucketType* bucket, int pos) const;
+ BucketType* childForPos(OperationContext* opCtx, BucketType* bucket, int pos) const;
- BucketType* getBucket(OperationContext* txn, const DiskLoc dl) const {
- return getBucket(txn, dl.toRecordId());
+ BucketType* getBucket(OperationContext* opCtx, const DiskLoc dl) const {
+ return getBucket(opCtx, dl.toRecordId());
}
- BucketType* getBucket(OperationContext* txn, const RecordId dl) const;
+ BucketType* getBucket(OperationContext* opCtx, const RecordId dl) const;
- BucketType* getRoot(OperationContext* txn) const;
+ BucketType* getRoot(OperationContext* opCtx) const;
- DiskLoc getRootLoc(OperationContext* txn) const;
+ DiskLoc getRootLoc(OperationContext* opCtx) const;
- void recordRandomWalk(OperationContext* txn,
+ void recordRandomWalk(OperationContext* opCtx,
PseudoRandom* prng,
BucketType* curBucket,
int64_t nBucketsInCurrentLevel,
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
index 5d24c5b5713..e34a5c5a22e 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
@@ -63,18 +63,18 @@ public:
protected:
void checkValidNumKeys(int nKeys) {
- OperationContextNoop txn;
- ASSERT_EQUALS(nKeys, _helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ OperationContextNoop opCtx;
+ ASSERT_EQUALS(nKeys, _helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
}
Status insert(const BSONObj& key, const DiskLoc dl, bool dupsAllowed = true) {
- OperationContextNoop txn;
- return _helper.btree.insert(&txn, key, dl, dupsAllowed);
+ OperationContextNoop opCtx;
+ return _helper.btree.insert(&opCtx, key, dl, dupsAllowed);
}
bool unindex(const BSONObj& key) {
- OperationContextNoop txn;
- return _helper.btree.unindex(&txn, key, _helper.dummyDiskLoc);
+ OperationContextNoop opCtx;
+ return _helper.btree.unindex(&opCtx, key, _helper.dummyDiskLoc);
}
void locate(const BSONObj& key,
@@ -92,9 +92,10 @@ protected:
int direction) {
int pos;
DiskLoc loc;
- OperationContextNoop txn;
- ASSERT_EQUALS(expectedFound,
- _helper.btree.locate(&txn, key, _helper.dummyDiskLoc, direction, &pos, &loc));
+ OperationContextNoop opCtx;
+ ASSERT_EQUALS(
+ expectedFound,
+ _helper.btree.locate(&opCtx, key, _helper.dummyDiskLoc, direction, &pos, &loc));
ASSERT_EQUALS(expectedLocation, loc);
ASSERT_EQUALS(expectedPos, pos);
}
@@ -116,8 +117,8 @@ protected:
}
BucketType* head() const {
- OperationContextNoop txn;
- return _helper.btree.getBucket(&txn, _helper.headManager.getHead(&txn));
+ OperationContextNoop opCtx;
+ return _helper.btree.getBucket(&opCtx, _helper.headManager.getHead(&opCtx));
}
void forcePackBucket(const RecordId bucketLoc) {
@@ -138,8 +139,8 @@ protected:
int bucketRebalancedSeparatorPos(const RecordId bucketLoc, int leftIndex) {
BucketType* bucket = _helper.btree.getBucket(NULL, bucketLoc);
- OperationContextNoop txn;
- return _helper.btree._rebalancedSeparatorPos(&txn, bucket, leftIndex);
+ OperationContextNoop opCtx;
+ return _helper.btree._rebalancedSeparatorPos(&opCtx, bucket, leftIndex);
}
FullKey getKey(const RecordId bucketLoc, int pos) const {
@@ -155,20 +156,20 @@ protected:
}
DiskLoc newBucket() {
- OperationContextNoop txn;
- return _helper.btree._addBucket(&txn);
+ OperationContextNoop opCtx;
+ return _helper.btree._addBucket(&opCtx);
}
/**
* Sets the nextChild pointer for the bucket at the specified location.
*/
void setBucketNextChild(const DiskLoc bucketLoc, const DiskLoc nextChild) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
- BucketType* bucket = _helper.btree.getBucket(&txn, bucketLoc);
+ BucketType* bucket = _helper.btree.getBucket(&opCtx, bucketLoc);
bucket->nextChild = nextChild;
- _helper.btree.fixParentPtrs(&txn, bucket, bucketLoc);
+ _helper.btree.fixParentPtrs(&opCtx, bucket, bucketLoc);
}
protected:
@@ -183,8 +184,8 @@ template <class OnDiskFormat>
class SimpleCreate : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ OperationContextNoop opCtx;
+ this->_helper.btree.initAsEmpty(&opCtx);
this->checkValidNumKeys(0);
}
@@ -194,14 +195,14 @@ template <class OnDiskFormat>
class SimpleInsertDelete : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ OperationContextNoop opCtx;
+ this->_helper.btree.initAsEmpty(&opCtx);
BSONObj key = simpleKey('z');
this->insert(key, this->_helper.dummyDiskLoc);
this->checkValidNumKeys(1);
- this->locate(key, 0, true, this->_helper.headManager.getHead(&txn), 1);
+ this->locate(key, 0, true, this->_helper.headManager.getHead(&opCtx), 1);
this->unindex(key);
@@ -214,8 +215,8 @@ template <class OnDiskFormat>
class SplitUnevenBucketBase : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ OperationContextNoop opCtx;
+ this->_helper.btree.initAsEmpty(&opCtx);
for (int i = 0; i < 10; ++i) {
BSONObj shortKey = simpleKey(shortToken(i), 1);
@@ -278,17 +279,17 @@ template <class OnDiskFormat>
class MissingLocate : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ OperationContextNoop opCtx;
+ this->_helper.btree.initAsEmpty(&opCtx);
for (int i = 0; i < 3; ++i) {
BSONObj k = simpleKey('b' + 2 * i);
this->insert(k, this->_helper.dummyDiskLoc);
}
- locateExtended(1, 'a', 'b', this->_helper.headManager.getHead(&txn));
- locateExtended(1, 'c', 'd', this->_helper.headManager.getHead(&txn));
- locateExtended(1, 'e', 'f', this->_helper.headManager.getHead(&txn));
+ locateExtended(1, 'a', 'b', this->_helper.headManager.getHead(&opCtx));
+ locateExtended(1, 'c', 'd', this->_helper.headManager.getHead(&opCtx));
+ locateExtended(1, 'e', 'f', this->_helper.headManager.getHead(&opCtx));
locateExtended(1, 'g', 'g' + 1, RecordId()); // of course, 'h' isn't in the index.
// old behavior
@@ -298,9 +299,9 @@ public:
// locateExtended( -1, 'g', 'f', dl() );
locateExtended(-1, 'a', 'a' - 1, RecordId()); // of course, 'a' - 1 isn't in the index
- locateExtended(-1, 'c', 'b', this->_helper.headManager.getHead(&txn));
- locateExtended(-1, 'e', 'd', this->_helper.headManager.getHead(&txn));
- locateExtended(-1, 'g', 'f', this->_helper.headManager.getHead(&txn));
+ locateExtended(-1, 'c', 'b', this->_helper.headManager.getHead(&opCtx));
+ locateExtended(-1, 'e', 'd', this->_helper.headManager.getHead(&opCtx));
+ locateExtended(-1, 'g', 'f', this->_helper.headManager.getHead(&opCtx));
}
private:
@@ -316,8 +317,8 @@ template <class OnDiskFormat>
class MissingLocateMultiBucket : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ OperationContextNoop opCtx;
+ this->_helper.btree.initAsEmpty(&opCtx);
this->insert(simpleKey('A', 800), this->_helper.dummyDiskLoc);
this->insert(simpleKey('B', 800), this->_helper.dummyDiskLoc);
@@ -337,23 +338,23 @@ public:
// 'E' is the split point and should be in the head the rest should be ~50/50
const BSONObj splitPoint = simpleKey('E', 800);
- this->_helper.btree.locate(&txn, splitPoint, this->_helper.dummyDiskLoc, 1, &pos, &loc);
- ASSERT_EQUALS(this->_helper.headManager.getHead(&txn), loc.toRecordId());
+ this->_helper.btree.locate(&opCtx, splitPoint, this->_helper.dummyDiskLoc, 1, &pos, &loc);
+ ASSERT_EQUALS(this->_helper.headManager.getHead(&opCtx), loc.toRecordId());
ASSERT_EQUALS(0, pos);
// Find the one before 'E'
int largePos;
DiskLoc largeLoc;
this->_helper.btree.locate(
- &txn, splitPoint, this->_helper.dummyDiskLoc, 1, &largePos, &largeLoc);
- this->_helper.btree.advance(&txn, &largeLoc, &largePos, -1);
+ &opCtx, splitPoint, this->_helper.dummyDiskLoc, 1, &largePos, &largeLoc);
+ this->_helper.btree.advance(&opCtx, &largeLoc, &largePos, -1);
// Find the one after 'E'
int smallPos;
DiskLoc smallLoc;
this->_helper.btree.locate(
- &txn, splitPoint, this->_helper.dummyDiskLoc, 1, &smallPos, &smallLoc);
- this->_helper.btree.advance(&txn, &smallLoc, &smallPos, 1);
+ &opCtx, splitPoint, this->_helper.dummyDiskLoc, 1, &smallPos, &smallLoc);
+ this->_helper.btree.advance(&opCtx, &smallLoc, &smallPos, 1);
ASSERT_NOT_EQUALS(smallLoc, largeLoc);
ASSERT_NOT_EQUALS(smallLoc, loc);
@@ -368,8 +369,8 @@ template <class OnDiskFormat>
class SERVER983 : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ OperationContextNoop opCtx;
+ this->_helper.btree.initAsEmpty(&opCtx);
this->insert(simpleKey('A', 800), this->_helper.dummyDiskLoc);
this->insert(simpleKey('B', 800), this->_helper.dummyDiskLoc);
@@ -389,23 +390,23 @@ public:
// 'H' is the maximum 'large' interval key, 90% should be < 'H' and 10% larger
const BSONObj splitPoint = simpleKey('H', 800);
- this->_helper.btree.locate(&txn, splitPoint, this->_helper.dummyDiskLoc, 1, &pos, &loc);
- ASSERT_EQUALS(this->_helper.headManager.getHead(&txn), loc.toRecordId());
+ this->_helper.btree.locate(&opCtx, splitPoint, this->_helper.dummyDiskLoc, 1, &pos, &loc);
+ ASSERT_EQUALS(this->_helper.headManager.getHead(&opCtx), loc.toRecordId());
ASSERT_EQUALS(0, pos);
// Find the one before 'H'
int largePos;
DiskLoc largeLoc;
this->_helper.btree.locate(
- &txn, splitPoint, this->_helper.dummyDiskLoc, 1, &largePos, &largeLoc);
- this->_helper.btree.advance(&txn, &largeLoc, &largePos, -1);
+ &opCtx, splitPoint, this->_helper.dummyDiskLoc, 1, &largePos, &largeLoc);
+ this->_helper.btree.advance(&opCtx, &largeLoc, &largePos, -1);
// Find the one after 'H'
int smallPos;
DiskLoc smallLoc;
this->_helper.btree.locate(
- &txn, splitPoint, this->_helper.dummyDiskLoc, 1, &smallPos, &smallLoc);
- this->_helper.btree.advance(&txn, &smallLoc, &smallPos, 1);
+ &opCtx, splitPoint, this->_helper.dummyDiskLoc, 1, &smallPos, &smallLoc);
+ this->_helper.btree.advance(&opCtx, &smallLoc, &smallPos, 1);
ASSERT_NOT_EQUALS(smallLoc, largeLoc);
ASSERT_NOT_EQUALS(smallLoc, loc);
@@ -417,8 +418,8 @@ template <class OnDiskFormat>
class DontReuseUnused : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ OperationContextNoop opCtx;
+ this->_helper.btree.initAsEmpty(&opCtx);
for (int i = 0; i < 10; ++i) {
const BSONObj k = simpleKey('b' + 2 * i, 800);
@@ -437,8 +438,8 @@ template <class OnDiskFormat>
class MergeBucketsTestBase : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ OperationContextNoop opCtx;
+ this->_helper.btree.initAsEmpty(&opCtx);
for (int i = 0; i < 10; ++i) {
const BSONObj k = simpleKey('b' + 2 * i, 800);
@@ -453,7 +454,7 @@ public:
long long unusedCount = 0;
ASSERT_EQUALS(expectedCount,
- this->_helper.btree.fullValidate(&txn, &unusedCount, true, false, 0));
+ this->_helper.btree.fullValidate(&opCtx, &unusedCount, true, false, 0));
ASSERT_EQUALS(0, unusedCount);
}
@@ -493,8 +494,8 @@ template <class OnDiskFormat>
class MergeBucketsDontReplaceHead : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ OperationContextNoop opCtx;
+ this->_helper.btree.initAsEmpty(&opCtx);
for (int i = 0; i < 18; ++i) {
const BSONObj k = simpleKey('a' + i, 800);
@@ -509,7 +510,7 @@ public:
ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL) - 1);
long long unusedCount = 0;
- ASSERT_EQUALS(17, this->_helper.btree.fullValidate(&txn, &unusedCount, true, false, 0));
+ ASSERT_EQUALS(17, this->_helper.btree.fullValidate(&opCtx, &unusedCount, true, false, 0));
ASSERT_EQUALS(0, unusedCount);
}
};
@@ -518,11 +519,11 @@ template <class OnDiskFormat>
class MergeBucketsDelInternal : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{d:{b:{a:null},bb:null,_:{c:null}},_:{f:{e:null},_:{g:null}}}");
- ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
@@ -531,7 +532,7 @@ public:
<< "bb");
verify(this->unindex(k));
- ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
@@ -544,11 +545,11 @@ template <class OnDiskFormat>
class MergeBucketsRightNull : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{d:{b:{a:null},bb:null,cc:{c:null}},_:{f:{e:null},h:{g:null}}}");
- ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
@@ -557,7 +558,7 @@ public:
<< "bb");
verify(this->unindex(k));
- ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
@@ -573,12 +574,12 @@ template <class OnDiskFormat>
class DontMergeSingleBucket : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{d:{b:{a:null},c:null}}");
- ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
@@ -587,7 +588,7 @@ public:
<< "c");
verify(this->unindex(k));
- ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
@@ -600,12 +601,12 @@ template <class OnDiskFormat>
class ParentMergeNonRightToLeft : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{d:{b:{a:null},bb:null,cc:{c:null}},i:{f:{e:null},h:{g:null}}}");
- ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
@@ -614,7 +615,7 @@ public:
<< "bb");
verify(this->unindex(k));
- ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// Child does not currently replace parent in this case. Also, the tree
// has 6 buckets + 1 for the this->_helper.dummyDiskLoc.
@@ -628,12 +629,12 @@ template <class OnDiskFormat>
class ParentMergeNonRightToRight : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{d:{b:{a:null},cc:{c:null}},i:{f:{e:null},ff:null,h:{g:null}}}");
- ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
@@ -642,7 +643,7 @@ public:
<< "ff");
verify(this->unindex(k));
- ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// Child does not currently replace parent in this case. Also, the tree
// has 6 buckets + 1 for the this->_helper.dummyDiskLoc.
@@ -656,15 +657,15 @@ template <class OnDiskFormat>
class CantMergeRightNoMerge : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree(
"{d:{b:{a:null},bb:null,cc:{c:null}},"
"dd:null,"
"_:{f:{e:null},h:{g:null}}}");
- ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
@@ -673,7 +674,7 @@ public:
<< "bb");
verify(this->unindex(k));
- ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
@@ -689,12 +690,12 @@ template <class OnDiskFormat>
class CantMergeLeftNoMerge : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{c:{b:{a:null}},d:null,_:{f:{e:null},g:null}}");
- ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
@@ -703,7 +704,7 @@ public:
<< "g");
verify(this->unindex(k));
- ASSERT_EQUALS(6, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(6, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
@@ -716,12 +717,12 @@ template <class OnDiskFormat>
class MergeOption : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{c:{b:{a:null}},f:{e:{d:null},ee:null},_:{h:{g:null}}}");
- ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
@@ -730,7 +731,7 @@ public:
<< "ee");
verify(this->unindex(k));
- ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
@@ -743,12 +744,12 @@ template <class OnDiskFormat>
class ForceMergeLeft : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{c:{b:{a:null}},f:{e:{d:null},ee:null},ff:null,_:{h:{g:null}}}");
- ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
@@ -757,7 +758,7 @@ public:
<< "ee");
verify(this->unindex(k));
- ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
@@ -770,12 +771,12 @@ template <class OnDiskFormat>
class ForceMergeRight : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{c:{b:{a:null}},cc:null,f:{e:{d:null},ee:null},_:{h:{g:null}}}");
- ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
@@ -784,7 +785,7 @@ public:
<< "ee");
verify(this->unindex(k));
- ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
@@ -797,12 +798,12 @@ template <class OnDiskFormat>
class RecursiveMerge : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{h:{e:{b:{a:null},c:null,d:null},g:{f:null}},j:{i:null}}");
- ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
@@ -811,7 +812,7 @@ public:
<< "c");
verify(this->unindex(k));
- ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -825,12 +826,12 @@ template <class OnDiskFormat>
class RecursiveMergeRightBucket : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{h:{e:{b:{a:null},c:null,d:null},g:{f:null}},_:{i:null}}");
- ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
@@ -839,7 +840,7 @@ public:
<< "c");
verify(this->unindex(k));
- ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
@@ -852,12 +853,12 @@ template <class OnDiskFormat>
class RecursiveMergeDoubleRightBucket : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{h:{e:{b:{a:null},c:null,d:null},_:{f:null}},_:{i:null}}");
- ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
@@ -866,7 +867,7 @@ public:
<< "c");
verify(this->unindex(k));
- ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -882,20 +883,20 @@ public:
MergeSizeTestBase() : _count(0) {}
void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ OperationContextNoop opCtx;
+ this->_helper.btree.initAsEmpty(&opCtx);
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
const BSONObj& topKey = biggestKey('m');
DiskLoc leftChild = this->newBucket();
builder.push(
- DiskLoc::fromRecordId(this->_helper.headManager.getHead(&txn)), topKey, leftChild);
+ DiskLoc::fromRecordId(this->_helper.headManager.getHead(&opCtx)), topKey, leftChild);
_count++;
DiskLoc rightChild = this->newBucket();
- this->setBucketNextChild(DiskLoc::fromRecordId(this->_helper.headManager.getHead(&txn)),
+ this->setBucketNextChild(DiskLoc::fromRecordId(this->_helper.headManager.getHead(&opCtx)),
rightChild);
_count += builder.fillBucketToExactSize(leftChild, leftSize(), 'a');
@@ -924,7 +925,8 @@ public:
const char* keys = delKeys();
for (const char* i = keys; *i; ++i) {
long long unused = 0;
- ASSERT_EQUALS(_count, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(_count,
+ this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
ASSERT_EQUALS(0, unused);
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
@@ -937,7 +939,7 @@ public:
}
long long unused = 0;
- ASSERT_EQUALS(_count, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(_count, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
ASSERT_EQUALS(0, unused);
validate();
@@ -1185,14 +1187,14 @@ protected:
}
virtual void initCheck() {
- OperationContextNoop txn;
- _oldTop = this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson();
+ OperationContextNoop opCtx;
+ _oldTop = this->getKey(this->_helper.headManager.getHead(&opCtx), 0).data.toBson();
}
virtual void validate() {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
ASSERT_BSONOBJ_NE(_oldTop,
- this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
+ this->getKey(this->_helper.headManager.getHead(&opCtx), 0).data.toBson());
}
private:
@@ -1212,14 +1214,14 @@ protected:
}
virtual void initCheck() {
- OperationContextNoop txn;
- _oldTop = this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson();
+ OperationContextNoop opCtx;
+ _oldTop = this->getKey(this->_helper.headManager.getHead(&opCtx), 0).data.toBson();
}
virtual void validate() {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
ASSERT_BSONOBJ_NE(_oldTop,
- this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
+ this->getKey(this->_helper.headManager.getHead(&opCtx), 0).data.toBson());
}
private:
@@ -1230,15 +1232,15 @@ template <class OnDiskFormat>
class BalanceOneLeftToRight : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree(
"{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},"
"b:{$20:null,$30:null,$40:null,$50:null,a:null},"
"_:{c:null}}");
- ASSERT_EQUALS(14, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(14, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -1246,7 +1248,7 @@ public:
const BSONObj k = BSON("" << bigNumString(0x40, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -1262,15 +1264,15 @@ template <class OnDiskFormat>
class BalanceOneRightToLeft : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree(
"{$10:{$1:null,$2:null,$3:null,$4:null},"
"b:{$20:null,$30:null,$40:null,$50:null,$60:null,$70:null},"
"_:{c:null}}");
- ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -1278,7 +1280,7 @@ public:
const BSONObj k = BSON("" << bigNumString(0x3, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -1294,8 +1296,8 @@ template <class OnDiskFormat>
class BalanceThreeLeftToRight : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree(
"{$20:{$1:{$0:null},$3:{$2:null},$5:{$4:null},$7:{$6:null},"
@@ -1303,7 +1305,7 @@ public:
"b:{$30:null,$40:{$35:null},$50:{$45:null}},"
"_:{c:null}}");
- ASSERT_EQUALS(23, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(23, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 14 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(15, this->_helper.recordStore.numRecords(NULL));
@@ -1311,7 +1313,7 @@ public:
const BSONObj k = BSON("" << bigNumString(0x30, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(22, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(22, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 14 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(15, this->_helper.recordStore.numRecords(NULL));
@@ -1329,8 +1331,8 @@ template <class OnDiskFormat>
class BalanceThreeRightToLeft : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree(
"{$20:{$1:{$0:null},$3:{$2:null},$5:null,_:{$14:null}},"
@@ -1339,7 +1341,7 @@ public:
"$90:{$85:null},$100:{$95:null}},"
"_:{c:null}}");
- ASSERT_EQUALS(25, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(25, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 15 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(16, this->_helper.recordStore.numRecords(NULL));
@@ -1347,7 +1349,7 @@ public:
const BSONObj k = BSON("" << bigNumString(0x5, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(24, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(24, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 15 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(16, this->_helper.recordStore.numRecords(NULL));
@@ -1365,14 +1367,14 @@ template <class OnDiskFormat>
class BalanceSingleParentKey : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree(
"{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},"
"_:{$20:null,$30:null,$40:null,$50:null,a:null}}");
- ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
@@ -1380,7 +1382,7 @@ public:
const BSONObj k = BSON("" << bigNumString(0x40, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
@@ -1395,8 +1397,8 @@ template <class OnDiskFormat>
class PackEmptyBucket : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{a:null}");
@@ -1404,7 +1406,7 @@ public:
<< "a");
ASSERT(this->unindex(k));
- this->forcePackBucket(this->_helper.headManager.getHead(&txn));
+ this->forcePackBucket(this->_helper.headManager.getHead(&opCtx));
typename BtreeLogicTestBase<OnDiskFormat>::BucketType* headBucket = this->head();
@@ -1425,8 +1427,8 @@ template <class OnDiskFormat>
class PackedDataSizeEmptyBucket : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{a:null}");
@@ -1434,7 +1436,7 @@ public:
<< "a");
ASSERT(this->unindex(k));
- this->forcePackBucket(this->_helper.headManager.getHead(&txn));
+ this->forcePackBucket(this->_helper.headManager.getHead(&opCtx));
typename BtreeLogicTestBase<OnDiskFormat>::BucketType* headBucket = this->head();
@@ -1449,25 +1451,25 @@ template <class OnDiskFormat>
class BalanceSingleParentKeyPackParent : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree(
"{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},"
"_:{$20:null,$30:null,$40:null,$50:null,a:null}}");
- ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
// force parent pack
- this->forcePackBucket(this->_helper.headManager.getHead(&txn));
+ this->forcePackBucket(this->_helper.headManager.getHead(&opCtx));
const BSONObj k = BSON("" << bigNumString(0x40, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
@@ -1482,8 +1484,8 @@ template <class OnDiskFormat>
class BalanceSplitParent : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree(
"{$10$10:{$1:null,$2:null,$3:null,$4:null},"
@@ -1491,7 +1493,7 @@ public:
"$200:null,$300:null,$400:null,$500:null,$600:null,"
"$700:null,$800:null,$900:null,_:{c:null}}");
- ASSERT_EQUALS(22, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(22, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -1499,7 +1501,7 @@ public:
const BSONObj k = BSON("" << bigNumString(0x3, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(21, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(21, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
@@ -1516,15 +1518,15 @@ template <class OnDiskFormat>
class RebalancedSeparatorBase : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree(treeSpec());
modTree();
ASSERT_EQUALS(
expectedSeparator(),
- this->bucketRebalancedSeparatorPos(this->_helper.headManager.getHead(&txn), 0));
+ this->bucketRebalancedSeparatorPos(this->_helper.headManager.getHead(&opCtx), 0));
}
virtual string treeSpec() const = 0;
@@ -1658,14 +1660,14 @@ class NoMoveAtLowWaterMarkRight : public MergeSizeJustRightRight<OnDiskFormat> {
}
virtual void initCheck() {
- OperationContextNoop txn;
- _oldTop = this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson();
+ OperationContextNoop opCtx;
+ _oldTop = this->getKey(this->_helper.headManager.getHead(&opCtx), 0).data.toBson();
}
virtual void validate() {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
ASSERT_BSONOBJ_EQ(_oldTop,
- this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
+ this->getKey(this->_helper.headManager.getHead(&opCtx), 0).data.toBson());
}
virtual bool merge() const {
@@ -1686,10 +1688,10 @@ class MoveBelowLowWaterMarkRight : public NoMoveAtLowWaterMarkRight<OnDiskFormat
}
virtual void validate() {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
// Different top means we rebalanced
ASSERT_BSONOBJ_NE(this->_oldTop,
- this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
+ this->getKey(this->_helper.headManager.getHead(&opCtx), 0).data.toBson());
}
};
@@ -1699,14 +1701,14 @@ class NoMoveAtLowWaterMarkLeft : public MergeSizeJustRightLeft<OnDiskFormat> {
return MergeSizeJustRightLeft<OnDiskFormat>::leftSize() + 1;
}
virtual void initCheck() {
- OperationContextNoop txn;
- this->_oldTop = this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson();
+ OperationContextNoop opCtx;
+ this->_oldTop = this->getKey(this->_helper.headManager.getHead(&opCtx), 0).data.toBson();
}
virtual void validate() {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
ASSERT_BSONOBJ_EQ(this->_oldTop,
- this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
+ this->getKey(this->_helper.headManager.getHead(&opCtx), 0).data.toBson());
}
virtual bool merge() const {
return false;
@@ -1726,10 +1728,10 @@ class MoveBelowLowWaterMarkLeft : public NoMoveAtLowWaterMarkLeft<OnDiskFormat>
}
virtual void validate() {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
// Different top means we rebalanced
ASSERT_BSONOBJ_NE(this->_oldTop,
- this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
+ this->getKey(this->_helper.headManager.getHead(&opCtx), 0).data.toBson());
}
};
@@ -1737,15 +1739,15 @@ template <class OnDiskFormat>
class PreferBalanceLeft : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree(
"{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},"
"$20:{$11:null,$12:null,$13:null,$14:null},"
"_:{$30:null}}");
- ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -1753,7 +1755,7 @@ public:
const BSONObj k = BSON("" << bigNumString(0x12, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -1769,15 +1771,15 @@ template <class OnDiskFormat>
class PreferBalanceRight : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree(
"{$10:{$1:null},"
"$20:{$11:null,$12:null,$13:null,$14:null},"
"_:{$31:null,$32:null,$33:null,$34:null,$35:null,$36:null}}");
- ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -1785,7 +1787,7 @@ public:
const BSONObj k = BSON("" << bigNumString(0x12, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -1801,15 +1803,15 @@ template <class OnDiskFormat>
class RecursiveMergeThenBalance : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree(
"{$10:{$5:{$1:null,$2:null},$8:{$6:null,$7:null}},"
"_:{$20:null,$30:null,$40:null,$50:null,"
"$60:null,$70:null,$80:null,$90:null}}");
- ASSERT_EQUALS(15, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(15, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
@@ -1817,7 +1819,7 @@ public:
const BSONObj k = BSON("" << bigNumString(0x7, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(14, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(14, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -1832,12 +1834,12 @@ template <class OnDiskFormat>
class DelEmptyNoNeighbors : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{b:{a:null}}");
- ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
@@ -1846,7 +1848,7 @@ public:
<< "a");
ASSERT(this->unindex(k));
- ASSERT_EQUALS(1, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(1, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
@@ -1859,12 +1861,12 @@ template <class OnDiskFormat>
class DelEmptyEmptyNeighbors : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{a:null,c:{b:null},d:null}");
- ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
@@ -1873,7 +1875,7 @@ public:
<< "b");
ASSERT(this->unindex(k));
- ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
@@ -1886,13 +1888,13 @@ template <class OnDiskFormat>
class DelInternal : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{a:null,c:{b:null},d:null}");
long long unused = 0;
- ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
@@ -1902,7 +1904,7 @@ public:
<< "c");
ASSERT(this->unindex(k));
- ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
@@ -1916,17 +1918,17 @@ template <class OnDiskFormat>
class DelInternalReplaceWithUnused : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{a:null,c:{b:null},d:null}");
const DiskLoc prevChildBucket =
- this->getKey(this->_helper.headManager.getHead(&txn), 1).prevChildBucket;
+ this->getKey(this->_helper.headManager.getHead(&opCtx), 1).prevChildBucket;
this->markKeyUnused(prevChildBucket, 0);
long long unused = 0;
- ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
@@ -1937,7 +1939,7 @@ public:
ASSERT(this->unindex(k));
unused = 0;
- ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
@@ -1952,13 +1954,13 @@ template <class OnDiskFormat>
class DelInternalReplaceRight : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{a:null,_:{b:null}}");
long long unused = 0;
- ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
@@ -1969,7 +1971,7 @@ public:
ASSERT(this->unindex(k));
unused = 0;
- ASSERT_EQUALS(1, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(1, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
@@ -1983,13 +1985,13 @@ template <class OnDiskFormat>
class DelInternalPromoteKey : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{a:null,y:{d:{c:{b:null}},_:{e:null}},z:null}");
long long unused = 0;
- ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
@@ -2000,7 +2002,7 @@ public:
ASSERT(this->unindex(k));
unused = 0;
- ASSERT_EQUALS(6, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(6, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
@@ -2014,13 +2016,13 @@ template <class OnDiskFormat>
class DelInternalPromoteRightKey : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{a:null,_:{e:{c:null},_:{f:null}}}");
long long unused = 0;
- ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -2031,7 +2033,7 @@ public:
ASSERT(this->unindex(k));
unused = 0;
- ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
@@ -2045,13 +2047,13 @@ template <class OnDiskFormat>
class DelInternalReplacementPrevNonNull : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{a:null,d:{c:{b:null}},e:null}");
long long unused = 0;
- ASSERT_EQUALS(5, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(5, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
@@ -2061,7 +2063,7 @@ public:
<< "d");
ASSERT(this->unindex(k));
- ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
@@ -2070,7 +2072,7 @@ public:
builder.checkStructure("{a:null,d:{c:{b:null}},e:null}");
// Check 'unused' key
- ASSERT(this->getKey(this->_helper.headManager.getHead(&txn), 1).recordLoc.getOfs() & 1);
+ ASSERT(this->getKey(this->_helper.headManager.getHead(&opCtx), 1).recordLoc.getOfs() & 1);
}
};
@@ -2078,13 +2080,13 @@ template <class OnDiskFormat>
class DelInternalReplacementNextNonNull : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{a:null,_:{c:null,_:{d:null}}}");
long long unused = 0;
- ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
@@ -2094,7 +2096,7 @@ public:
<< "a");
ASSERT(this->unindex(k));
- ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
@@ -2103,7 +2105,7 @@ public:
builder.checkStructure("{a:null,_:{c:null,_:{d:null}}}");
// Check 'unused' key
- ASSERT(this->getKey(this->_helper.headManager.getHead(&txn), 0).recordLoc.getOfs() & 1);
+ ASSERT(this->getKey(this->_helper.headManager.getHead(&opCtx), 0).recordLoc.getOfs() & 1);
}
};
@@ -2111,15 +2113,15 @@ template <class OnDiskFormat>
class DelInternalSplitPromoteLeft : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree(
"{$10:null,$20:null,$30$10:{$25:{$23:null},_:{$27:null}},"
"$40:null,$50:null,$60:null,$70:null,$80:null,$90:null,$100:null}");
long long unused = 0;
- ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -2128,7 +2130,7 @@ public:
const BSONObj k = BSON("" << bigNumString(0x30, 0x10));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -2145,15 +2147,15 @@ template <class OnDiskFormat>
class DelInternalSplitPromoteRight : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree(
"{$10:null,$20:null,$30:null,$40:null,$50:null,$60:null,$70:null,"
"$80:null,$90:null,$100$10:{$95:{$93:null},_:{$97:null}}}");
long long unused = 0;
- ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -2162,7 +2164,7 @@ public:
const BSONObj k = BSON("" << bigNumString(0x100, 0x10));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -2178,8 +2180,8 @@ template <class OnDiskFormat>
class LocateEmptyForward : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ OperationContextNoop opCtx;
+ this->_helper.btree.initAsEmpty(&opCtx);
BSONObj key1 = simpleKey('a');
this->insert(key1, this->_helper.dummyDiskLoc);
@@ -2189,7 +2191,7 @@ public:
this->insert(key3, this->_helper.dummyDiskLoc);
this->checkValidNumKeys(3);
- this->locate(BSONObj(), 0, false, this->_helper.headManager.getHead(&txn), 1);
+ this->locate(BSONObj(), 0, false, this->_helper.headManager.getHead(&opCtx), 1);
}
};
@@ -2197,8 +2199,8 @@ template <class OnDiskFormat>
class LocateEmptyReverse : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ OperationContextNoop opCtx;
+ this->_helper.btree.initAsEmpty(&opCtx);
BSONObj key1 = simpleKey('a');
this->insert(key1, this->_helper.dummyDiskLoc);
@@ -2216,27 +2218,27 @@ template <class OnDiskFormat>
class DuplicateKeys : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ OperationContextNoop opCtx;
+ this->_helper.btree.initAsEmpty(&opCtx);
BSONObj key1 = simpleKey('z');
ASSERT_OK(this->insert(key1, this->_helper.dummyDiskLoc, true));
this->checkValidNumKeys(1);
- this->locate(key1, 0, true, this->_helper.headManager.getHead(&txn), 1);
+ this->locate(key1, 0, true, this->_helper.headManager.getHead(&opCtx), 1);
// Attempt to insert a dup key/value, which is okay.
ASSERT_EQUALS(Status::OK(), this->insert(key1, this->_helper.dummyDiskLoc, true));
this->checkValidNumKeys(1);
- this->locate(key1, 0, true, this->_helper.headManager.getHead(&txn), 1);
+ this->locate(key1, 0, true, this->_helper.headManager.getHead(&opCtx), 1);
// Attempt to insert a dup key/value with dupsAllowed=false.
ASSERT_EQUALS(ErrorCodes::DuplicateKeyValue,
this->insert(key1, this->_helper.dummyDiskLoc, false));
this->checkValidNumKeys(1);
- this->locate(key1, 0, true, this->_helper.headManager.getHead(&txn), 1);
+ this->locate(key1, 0, true, this->_helper.headManager.getHead(&opCtx), 1);
// Add another record to produce another diskloc.
- StatusWith<RecordId> s = this->_helper.recordStore.insertRecord(&txn, "a", 1, false);
+ StatusWith<RecordId> s = this->_helper.recordStore.insertRecord(&opCtx, "a", 1, false);
ASSERT_TRUE(s.isOK());
ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
@@ -2252,7 +2254,7 @@ public:
this->checkValidNumKeys(2);
// Clean up.
- this->_helper.recordStore.deleteRecord(&txn, s.getValue());
+ this->_helper.recordStore.deleteRecord(&opCtx, s.getValue());
ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
}
};
@@ -2330,14 +2332,14 @@ public:
}
long long unused = 0;
- ASSERT_EQUALS( 0, bt()->fullValidate(&txn, dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, bt()->fullValidate(&opCtx, dl(), order(), &unused, true ) );
for ( long long i = 50000; i < 50100; ++i ) {
insert( i );
}
long long unused2 = 0;
- ASSERT_EQUALS( 100, bt()->fullValidate(&txn, dl(), order(), &unused2, true ) );
+ ASSERT_EQUALS( 100, bt()->fullValidate(&opCtx, dl(), order(), &unused2, true ) );
// log() << "old unused: " << unused << ", new unused: " << unused2 << endl;
//
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_test_help.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_test_help.cpp
index 663075e5cb8..e02a01923f7 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_test_help.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_test_help.cpp
@@ -73,9 +73,9 @@ BtreeLogicTestHelper<OnDiskFormat>::BtreeLogicTestHelper(const BSONObj& order)
// Generate a valid record location for a "fake" record, which we will repeatedly use
// thoughout the tests.
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
StatusWith<RecordId> s =
- recordStore.insertRecord(&txn, randomData.c_str(), randomData.length(), false);
+ recordStore.insertRecord(&opCtx, randomData.c_str(), randomData.length(), false);
ASSERT_TRUE(s.isOK());
ASSERT_EQUALS(1, recordStore.numRecords(NULL));
@@ -90,13 +90,13 @@ BtreeLogicTestHelper<OnDiskFormat>::BtreeLogicTestHelper(const BSONObj& order)
template <class OnDiskFormat>
void ArtificialTreeBuilder<OnDiskFormat>::makeTree(const string& spec) {
- _helper->headManager.setHead(_txn, makeTree(fromjson(spec)).toRecordId());
+ _helper->headManager.setHead(_opCtx, makeTree(fromjson(spec)).toRecordId());
}
template <class OnDiskFormat>
DiskLoc ArtificialTreeBuilder<OnDiskFormat>::makeTree(const BSONObj& spec) {
- DiskLoc bucketLoc = _helper->btree._addBucket(_txn);
- BucketType* bucket = _helper->btree.getBucket(_txn, bucketLoc);
+ DiskLoc bucketLoc = _helper->btree._addBucket(_opCtx);
+ BucketType* bucket = _helper->btree.getBucket(_opCtx, bucketLoc);
BSONObjIterator i(spec);
while (i.more()) {
@@ -114,13 +114,13 @@ DiskLoc ArtificialTreeBuilder<OnDiskFormat>::makeTree(const BSONObj& spec) {
}
}
- _helper->btree.fixParentPtrs(_txn, bucket, bucketLoc);
+ _helper->btree.fixParentPtrs(_opCtx, bucket, bucketLoc);
return bucketLoc;
}
template <class OnDiskFormat>
void ArtificialTreeBuilder<OnDiskFormat>::checkStructure(const string& spec) const {
- checkStructure(fromjson(spec), DiskLoc::fromRecordId(_helper->headManager.getHead(_txn)));
+ checkStructure(fromjson(spec), DiskLoc::fromRecordId(_helper->headManager.getHead(_opCtx)));
}
template <class OnDiskFormat>
@@ -128,16 +128,16 @@ void ArtificialTreeBuilder<OnDiskFormat>::push(const DiskLoc bucketLoc,
const BSONObj& key,
const DiskLoc child) {
KeyDataOwnedType k(key);
- BucketType* bucket = _helper->btree.getBucket(_txn, bucketLoc);
+ BucketType* bucket = _helper->btree.getBucket(_opCtx, bucketLoc);
invariant(_helper->btree.pushBack(bucket, _helper->dummyDiskLoc, k, child));
- _helper->btree.fixParentPtrs(_txn, bucket, bucketLoc);
+ _helper->btree.fixParentPtrs(_opCtx, bucket, bucketLoc);
}
template <class OnDiskFormat>
void ArtificialTreeBuilder<OnDiskFormat>::checkStructure(const BSONObj& spec,
const DiskLoc node) const {
- BucketType* bucket = _helper->btree.getBucket(_txn, node);
+ BucketType* bucket = _helper->btree.getBucket(_opCtx, node);
BSONObjIterator j(spec);
for (int i = 0; i < bucket->n; ++i) {
@@ -172,8 +172,8 @@ template <class OnDiskFormat>
bool ArtificialTreeBuilder<OnDiskFormat>::isPresent(const BSONObj& key, int direction) const {
int pos;
DiskLoc loc;
- OperationContextNoop txn;
- return _helper->btree.locate(&txn, key, _helper->dummyDiskLoc, direction, &pos, &loc);
+ OperationContextNoop opCtx;
+ return _helper->btree.locate(&opCtx, key, _helper->dummyDiskLoc, direction, &pos, &loc);
}
// Static
@@ -200,7 +200,7 @@ int ArtificialTreeBuilder<OnDiskFormat>::fillBucketToExactSize(const DiskLoc buc
char startKey) {
ASSERT_FALSE(bucketLoc.isNull());
- BucketType* bucket = _helper->btree.getBucket(_txn, bucketLoc);
+ BucketType* bucket = _helper->btree.getBucket(_opCtx, bucketLoc);
ASSERT_EQUALS(0, bucket->n);
static const int bigSize = KeyDataOwnedType(simpleKey('a', 801)).dataSize();
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_test_help.h b/src/mongo/db/storage/mmap_v1/btree/btree_test_help.h
index 5aeec516528..c5d48b48b3a 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_test_help.h
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_test_help.h
@@ -53,11 +53,11 @@ BSONObj simpleKey(char c, int n = 1);
*/
class TestHeadManager : public HeadManager {
public:
- virtual const RecordId getHead(OperationContext* txn) const {
+ virtual const RecordId getHead(OperationContext* opCtx) const {
return _head;
}
- virtual void setHead(OperationContext* txn, const RecordId newHead) {
+ virtual void setHead(OperationContext* opCtx, const RecordId newHead) {
_head = newHead;
}
@@ -100,8 +100,8 @@ public:
* does not do any cleanup, so constructing multiple trees over the same helper will
* cause leaked records.
*/
- ArtificialTreeBuilder(OperationContext* txn, BtreeLogicTestHelper<OnDiskFormat>* helper)
- : _txn(txn), _helper(helper) {}
+ ArtificialTreeBuilder(OperationContext* opCtx, BtreeLogicTestHelper<OnDiskFormat>* helper)
+ : _opCtx(opCtx), _helper(helper) {}
/**
* Causes the specified tree shape to be built on the associated helper and the tree's
@@ -143,7 +143,7 @@ private:
static std::string expectedKey(const char* spec);
- OperationContext* _txn;
+ OperationContext* _opCtx;
BtreeLogicTestHelper<OnDiskFormat>* _helper;
};
diff --git a/src/mongo/db/storage/mmap_v1/catalog/hashtab.h b/src/mongo/db/storage/mmap_v1/catalog/hashtab.h
index e22453b99db..f873e6a4d3a 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/hashtab.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/hashtab.h
@@ -61,25 +61,25 @@ public:
return 0;
}
- void kill(OperationContext* txn, const Namespace& k) {
+ void kill(OperationContext* opCtx, const Namespace& k) {
bool found;
int i = _find(k, found);
if (i >= 0 && found) {
Node* n = &_nodes(i);
- n = txn->recoveryUnit()->writing(n);
+ n = opCtx->recoveryUnit()->writing(n);
n->key.kill();
n->setUnused();
}
}
/** returns false if too full */
- bool put(OperationContext* txn, const Namespace& k, const NamespaceDetails& value) {
+ bool put(OperationContext* opCtx, const Namespace& k, const NamespaceDetails& value) {
bool found;
int i = _find(k, found);
if (i < 0)
return false;
- Node* n = txn->recoveryUnit()->writing(&_nodes(i));
+ Node* n = opCtx->recoveryUnit()->writing(&_nodes(i));
if (!found) {
n->key = k;
n->hash = k.hash();
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp
index d7f19c49f16..293fa482c41 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp
@@ -82,13 +82,13 @@ NamespaceDetails::NamespaceDetails(const DiskLoc& loc, bool capped) {
memset(_reserved, 0, sizeof(_reserved));
}
-NamespaceDetails::Extra* NamespaceDetails::allocExtra(OperationContext* txn,
+NamespaceDetails::Extra* NamespaceDetails::allocExtra(OperationContext* opCtx,
StringData ns,
NamespaceIndex& ni,
int nindexessofar) {
// Namespace details must always be changed under an exclusive DB lock
const NamespaceString nss(ns);
- invariant(txn->lockState()->isDbLockedForMode(nss.db(), MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(nss.db(), MODE_X));
int i = (nindexessofar - NIndexesBase) / NIndexesExtra;
verify(i >= 0 && i <= 1);
@@ -101,18 +101,18 @@ NamespaceDetails::Extra* NamespaceDetails::allocExtra(OperationContext* txn,
Extra temp;
temp.init();
- ni.add_ns(txn, extrans, reinterpret_cast<NamespaceDetails*>(&temp));
+ ni.add_ns(opCtx, extrans, reinterpret_cast<NamespaceDetails*>(&temp));
Extra* e = reinterpret_cast<NamespaceDetails::Extra*>(ni.details(extrans));
long ofs = e->ofsFrom(this);
if (i == 0) {
verify(_extraOffset == 0);
- *txn->recoveryUnit()->writing(&_extraOffset) = ofs;
+ *opCtx->recoveryUnit()->writing(&_extraOffset) = ofs;
verify(extra() == e);
} else {
Extra* hd = extra();
verify(hd->next(this) == 0);
- hd->setNext(txn, ofs);
+ hd->setNext(opCtx, ofs);
}
return e;
}
@@ -176,7 +176,7 @@ NamespaceDetails::IndexIterator::IndexIterator(const NamespaceDetails* _d,
}
// must be called when renaming a NS to fix up extra
-void NamespaceDetails::copyingFrom(OperationContext* txn,
+void NamespaceDetails::copyingFrom(OperationContext* opCtx,
StringData thisns,
NamespaceIndex& ni,
NamespaceDetails* src) {
@@ -184,35 +184,35 @@ void NamespaceDetails::copyingFrom(OperationContext* txn,
Extra* se = src->extra();
int n = NIndexesBase;
if (se) {
- Extra* e = allocExtra(txn, thisns, ni, n);
+ Extra* e = allocExtra(opCtx, thisns, ni, n);
while (1) {
n += NIndexesExtra;
e->copy(this, *se);
se = se->next(src);
if (se == 0)
break;
- Extra* nxt = allocExtra(txn, thisns, ni, n);
- e->setNext(txn, nxt->ofsFrom(this));
+ Extra* nxt = allocExtra(opCtx, thisns, ni, n);
+ e->setNext(opCtx, nxt->ofsFrom(this));
e = nxt;
}
verify(_extraOffset);
}
}
-NamespaceDetails* NamespaceDetails::writingWithoutExtra(OperationContext* txn) {
- return txn->recoveryUnit()->writing(this);
+NamespaceDetails* NamespaceDetails::writingWithoutExtra(OperationContext* opCtx) {
+ return opCtx->recoveryUnit()->writing(this);
}
// XXX - this method should go away
-NamespaceDetails* NamespaceDetails::writingWithExtra(OperationContext* txn) {
+NamespaceDetails* NamespaceDetails::writingWithExtra(OperationContext* opCtx) {
for (Extra* e = extra(); e; e = e->next(this)) {
- txn->recoveryUnit()->writing(e);
+ opCtx->recoveryUnit()->writing(e);
}
- return writingWithoutExtra(txn);
+ return writingWithoutExtra(opCtx);
}
-void NamespaceDetails::setMaxCappedDocs(OperationContext* txn, long long max) {
+void NamespaceDetails::setMaxCappedDocs(OperationContext* opCtx, long long max) {
massert(16499,
"max in a capped collection has to be < 2^31 or -1",
CollectionOptions::validMaxCappedDocs(&max));
@@ -222,21 +222,21 @@ void NamespaceDetails::setMaxCappedDocs(OperationContext* txn, long long max) {
/* ------------------------------------------------------------------------- */
-int NamespaceDetails::_catalogFindIndexByName(OperationContext* txn,
+int NamespaceDetails::_catalogFindIndexByName(OperationContext* opCtx,
const Collection* coll,
StringData name,
bool includeBackgroundInProgress) const {
IndexIterator i = ii(includeBackgroundInProgress);
while (i.more()) {
- const BSONObj obj = coll->docFor(txn, i.next().info.toRecordId()).value();
+ const BSONObj obj = coll->docFor(opCtx, i.next().info.toRecordId()).value();
if (name == obj.getStringField("name"))
return i.pos() - 1;
}
return -1;
}
-void NamespaceDetails::Extra::setNext(OperationContext* txn, long ofs) {
- *txn->recoveryUnit()->writing(&_next) = ofs;
+void NamespaceDetails::Extra::setNext(OperationContext* opCtx, long ofs) {
+ *opCtx->recoveryUnit()->writing(&_next) = ofs;
}
} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h
index f1196bcd166..cf82703a25d 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h
@@ -153,7 +153,7 @@ public:
return 0;
return (Extra*)(((char*)d) + _next);
}
- void setNext(OperationContext* txn, long ofs);
+ void setNext(OperationContext* opCtx, long ofs);
void copy(NamespaceDetails* d, const Extra& e) {
memcpy(this, &e, sizeof(Extra));
_next = 0;
@@ -165,15 +165,18 @@ public:
return (Extra*)(((char*)this) + _extraOffset);
}
/* add extra space for indexes when more than 10 */
- Extra* allocExtra(OperationContext* txn, StringData ns, NamespaceIndex& ni, int nindexessofar);
+ Extra* allocExtra(OperationContext* opCtx,
+ StringData ns,
+ NamespaceIndex& ni,
+ int nindexessofar);
- void copyingFrom(OperationContext* txn,
+ void copyingFrom(OperationContext* opCtx,
StringData thisns,
NamespaceIndex& ni,
NamespaceDetails* src); // must be called when renaming a NS to fix up extra
public:
- void setMaxCappedDocs(OperationContext* txn, long long max);
+ void setMaxCappedDocs(OperationContext* opCtx, long long max);
enum UserFlags {
Flag_UsePowerOf2Sizes = 1 << 0,
@@ -210,12 +213,12 @@ public:
* This fetches the IndexDetails for the next empty index slot. The caller must populate
* returned object. This handles allocating extra index space, if necessary.
*/
- IndexDetails& getNextIndexDetails(OperationContext* txn, Collection* collection);
+ IndexDetails& getNextIndexDetails(OperationContext* opCtx, Collection* collection);
- NamespaceDetails* writingWithoutExtra(OperationContext* txn);
+ NamespaceDetails* writingWithoutExtra(OperationContext* opCtx);
/** Make all linked Extra objects writeable as well */
- NamespaceDetails* writingWithExtra(OperationContext* txn);
+ NamespaceDetails* writingWithExtra(OperationContext* opCtx);
/**
* Returns the offset of the specified index name within the array of indexes. Must be
@@ -223,7 +226,7 @@ public:
*
* @return > 0 if index name was found, -1 otherwise.
*/
- int _catalogFindIndexByName(OperationContext* txn,
+ int _catalogFindIndexByName(OperationContext* opCtx,
const Collection* coll,
StringData name,
bool includeBackgroundInProgress) const;
@@ -234,7 +237,7 @@ private:
* a and b are 2 index ids, whose contents will be swapped
* must have a lock on the entire collection to do this
*/
- void swapIndex(OperationContext* txn, int a, int b);
+ void swapIndex(OperationContext* opCtx, int a, int b);
friend class IndexCatalog;
friend class IndexCatalogEntry;
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp
index 7d92f431db8..ee2031d4a7e 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp
@@ -65,8 +65,8 @@ NamespaceDetailsCollectionCatalogEntry::NamespaceDetailsCollectionCatalogEntry(
}
CollectionOptions NamespaceDetailsCollectionCatalogEntry::getCollectionOptions(
- OperationContext* txn) const {
- CollectionOptions options = _db->getCollectionOptions(txn, _namespacesRecordId);
+ OperationContext* opCtx) const {
+ CollectionOptions options = _db->getCollectionOptions(opCtx, _namespacesRecordId);
if (options.flagsSet) {
if (options.flags != _details->userFlags) {
@@ -84,11 +84,11 @@ CollectionOptions NamespaceDetailsCollectionCatalogEntry::getCollectionOptions(
return options;
}
-int NamespaceDetailsCollectionCatalogEntry::getTotalIndexCount(OperationContext* txn) const {
+int NamespaceDetailsCollectionCatalogEntry::getTotalIndexCount(OperationContext* opCtx) const {
return _details->nIndexes + _details->indexBuildsInProgress;
}
-int NamespaceDetailsCollectionCatalogEntry::getCompletedIndexCount(OperationContext* txn) const {
+int NamespaceDetailsCollectionCatalogEntry::getCompletedIndexCount(OperationContext* opCtx) const {
return _details->nIndexes;
}
@@ -96,22 +96,22 @@ int NamespaceDetailsCollectionCatalogEntry::getMaxAllowedIndexes() const {
return NamespaceDetails::NIndexesMax;
}
-void NamespaceDetailsCollectionCatalogEntry::getAllIndexes(OperationContext* txn,
+void NamespaceDetailsCollectionCatalogEntry::getAllIndexes(OperationContext* opCtx,
std::vector<std::string>* names) const {
NamespaceDetails::IndexIterator i = _details->ii(true);
while (i.more()) {
const IndexDetails& id = i.next();
- const BSONObj obj = _indexRecordStore->dataFor(txn, id.info.toRecordId()).toBson();
+ const BSONObj obj = _indexRecordStore->dataFor(opCtx, id.info.toRecordId()).toBson();
names->push_back(obj.getStringField("name"));
}
}
-bool NamespaceDetailsCollectionCatalogEntry::isIndexMultikey(OperationContext* txn,
+bool NamespaceDetailsCollectionCatalogEntry::isIndexMultikey(OperationContext* opCtx,
StringData idxName,
MultikeyPaths* multikeyPaths) const {
// TODO SERVER-22727: Populate 'multikeyPaths' with path components that cause 'idxName' to be
// multikey.
- int idxNo = _findIndexNumber(txn, idxName);
+ int idxNo = _findIndexNumber(opCtx, idxName);
invariant(idxNo >= 0);
return isIndexMultikey(idxNo);
}
@@ -121,16 +121,16 @@ bool NamespaceDetailsCollectionCatalogEntry::isIndexMultikey(int idxNo) const {
}
bool NamespaceDetailsCollectionCatalogEntry::setIndexIsMultikey(
- OperationContext* txn, StringData indexName, const MultikeyPaths& multikeyPaths) {
+ OperationContext* opCtx, StringData indexName, const MultikeyPaths& multikeyPaths) {
// TODO SERVER-22727: Store new path components from 'multikeyPaths' that cause 'indexName' to
// be multikey.
- int idxNo = _findIndexNumber(txn, indexName);
+ int idxNo = _findIndexNumber(opCtx, indexName);
invariant(idxNo >= 0);
const bool multikey = true;
- return setIndexIsMultikey(txn, idxNo, multikey);
+ return setIndexIsMultikey(opCtx, idxNo, multikey);
}
-bool NamespaceDetailsCollectionCatalogEntry::setIndexIsMultikey(OperationContext* txn,
+bool NamespaceDetailsCollectionCatalogEntry::setIndexIsMultikey(OperationContext* opCtx,
int idxNo,
bool multikey) {
unsigned long long mask = 1ULL << idxNo;
@@ -141,7 +141,7 @@ bool NamespaceDetailsCollectionCatalogEntry::setIndexIsMultikey(OperationContext
return false;
}
- *txn->recoveryUnit()->writing(&_details->multiKeyIndexBits) |= mask;
+ *opCtx->recoveryUnit()->writing(&_details->multiKeyIndexBits) |= mask;
} else {
// Shortcut if the bit is already set correctly
if (!(_details->multiKeyIndexBits & mask)) {
@@ -150,49 +150,49 @@ bool NamespaceDetailsCollectionCatalogEntry::setIndexIsMultikey(OperationContext
// Invert mask: all 1's except a 0 at the ith bit
mask = ~mask;
- *txn->recoveryUnit()->writing(&_details->multiKeyIndexBits) &= mask;
+ *opCtx->recoveryUnit()->writing(&_details->multiKeyIndexBits) &= mask;
}
return true;
}
-RecordId NamespaceDetailsCollectionCatalogEntry::getIndexHead(OperationContext* txn,
+RecordId NamespaceDetailsCollectionCatalogEntry::getIndexHead(OperationContext* opCtx,
StringData idxName) const {
- int idxNo = _findIndexNumber(txn, idxName);
+ int idxNo = _findIndexNumber(opCtx, idxName);
invariant(idxNo >= 0);
return _details->idx(idxNo).head.toRecordId();
}
-BSONObj NamespaceDetailsCollectionCatalogEntry::getIndexSpec(OperationContext* txn,
+BSONObj NamespaceDetailsCollectionCatalogEntry::getIndexSpec(OperationContext* opCtx,
StringData idxName) const {
- int idxNo = _findIndexNumber(txn, idxName);
+ int idxNo = _findIndexNumber(opCtx, idxName);
invariant(idxNo >= 0);
const IndexDetails& id = _details->idx(idxNo);
- return _indexRecordStore->dataFor(txn, id.info.toRecordId()).toBson();
+ return _indexRecordStore->dataFor(opCtx, id.info.toRecordId()).toBson();
}
-void NamespaceDetailsCollectionCatalogEntry::setIndexHead(OperationContext* txn,
+void NamespaceDetailsCollectionCatalogEntry::setIndexHead(OperationContext* opCtx,
StringData idxName,
const RecordId& newHead) {
- int idxNo = _findIndexNumber(txn, idxName);
+ int idxNo = _findIndexNumber(opCtx, idxName);
invariant(idxNo >= 0);
- *txn->recoveryUnit()->writing(&_details->idx(idxNo).head) = DiskLoc::fromRecordId(newHead);
+ *opCtx->recoveryUnit()->writing(&_details->idx(idxNo).head) = DiskLoc::fromRecordId(newHead);
}
-bool NamespaceDetailsCollectionCatalogEntry::isIndexReady(OperationContext* txn,
+bool NamespaceDetailsCollectionCatalogEntry::isIndexReady(OperationContext* opCtx,
StringData idxName) const {
- int idxNo = _findIndexNumber(txn, idxName);
+ int idxNo = _findIndexNumber(opCtx, idxName);
invariant(idxNo >= 0);
- return idxNo < getCompletedIndexCount(txn);
+ return idxNo < getCompletedIndexCount(opCtx);
}
-int NamespaceDetailsCollectionCatalogEntry::_findIndexNumber(OperationContext* txn,
+int NamespaceDetailsCollectionCatalogEntry::_findIndexNumber(OperationContext* opCtx,
StringData idxName) const {
NamespaceDetails::IndexIterator i = _details->ii(true);
while (i.more()) {
const IndexDetails& id = i.next();
int idxNo = i.pos() - 1;
- const BSONObj obj = _indexRecordStore->dataFor(txn, id.info.toRecordId()).toBson();
+ const BSONObj obj = _indexRecordStore->dataFor(opCtx, id.info.toRecordId()).toBson();
if (idxName == obj.getStringField("name"))
return idxNo;
}
@@ -221,29 +221,29 @@ public:
}
} iu_unittest;
-Status NamespaceDetailsCollectionCatalogEntry::removeIndex(OperationContext* txn,
+Status NamespaceDetailsCollectionCatalogEntry::removeIndex(OperationContext* opCtx,
StringData indexName) {
- int idxNo = _findIndexNumber(txn, indexName);
+ int idxNo = _findIndexNumber(opCtx, indexName);
if (idxNo < 0)
return Status(ErrorCodes::NamespaceNotFound, "index not found to remove");
RecordId infoLocation = _details->idx(idxNo).info.toRecordId();
{ // sanity check
- BSONObj info = _indexRecordStore->dataFor(txn, infoLocation).toBson();
+ BSONObj info = _indexRecordStore->dataFor(opCtx, infoLocation).toBson();
invariant(info["name"].String() == indexName);
}
{ // drop the namespace
string indexNamespace = IndexDescriptor::makeIndexNamespace(ns().ns(), indexName);
- Status status = _db->dropCollection(txn, indexNamespace);
+ Status status = _db->dropCollection(opCtx, indexNamespace);
if (!status.isOK()) {
return status;
}
}
{ // all info in the .ns file
- NamespaceDetails* d = _details->writingWithExtra(txn);
+ NamespaceDetails* d = _details->writingWithExtra(opCtx);
// fix the _multiKeyIndexBits, by moving all bits above me down one
d->multiKeyIndexBits = removeAndSlideBit(d->multiKeyIndexBits, idxNo);
@@ -253,100 +253,100 @@ Status NamespaceDetailsCollectionCatalogEntry::removeIndex(OperationContext* txn
else
d->nIndexes--;
- for (int i = idxNo; i < getTotalIndexCount(txn); i++)
+ for (int i = idxNo; i < getTotalIndexCount(opCtx); i++)
d->idx(i) = d->idx(i + 1);
- d->idx(getTotalIndexCount(txn)) = IndexDetails();
+ d->idx(getTotalIndexCount(opCtx)) = IndexDetails();
}
// Someone may be querying the system.indexes namespace directly, so we need to invalidate
// its cursors.
MMAPV1DatabaseCatalogEntry::invalidateSystemCollectionRecord(
- txn, NamespaceString(_db->name(), "system.indexes"), infoLocation);
+ opCtx, NamespaceString(_db->name(), "system.indexes"), infoLocation);
// remove from system.indexes
- _indexRecordStore->deleteRecord(txn, infoLocation);
+ _indexRecordStore->deleteRecord(opCtx, infoLocation);
return Status::OK();
}
-Status NamespaceDetailsCollectionCatalogEntry::prepareForIndexBuild(OperationContext* txn,
+Status NamespaceDetailsCollectionCatalogEntry::prepareForIndexBuild(OperationContext* opCtx,
const IndexDescriptor* desc) {
BSONObj spec = desc->infoObj();
// 1) entry in system.indexs
StatusWith<RecordId> systemIndexesEntry =
- _indexRecordStore->insertRecord(txn, spec.objdata(), spec.objsize(), false);
+ _indexRecordStore->insertRecord(opCtx, spec.objdata(), spec.objsize(), false);
if (!systemIndexesEntry.isOK())
return systemIndexesEntry.getStatus();
// 2) NamespaceDetails mods
IndexDetails* id;
try {
- id = &_details->idx(getTotalIndexCount(txn), true);
+ id = &_details->idx(getTotalIndexCount(opCtx), true);
} catch (DBException&) {
- _details->allocExtra(txn, ns().ns(), _db->_namespaceIndex, getTotalIndexCount(txn));
- id = &_details->idx(getTotalIndexCount(txn), false);
+ _details->allocExtra(opCtx, ns().ns(), _db->_namespaceIndex, getTotalIndexCount(opCtx));
+ id = &_details->idx(getTotalIndexCount(opCtx), false);
}
const DiskLoc infoLoc = DiskLoc::fromRecordId(systemIndexesEntry.getValue());
- *txn->recoveryUnit()->writing(&id->info) = infoLoc;
- *txn->recoveryUnit()->writing(&id->head) = DiskLoc();
+ *opCtx->recoveryUnit()->writing(&id->info) = infoLoc;
+ *opCtx->recoveryUnit()->writing(&id->head) = DiskLoc();
- txn->recoveryUnit()->writingInt(_details->indexBuildsInProgress) += 1;
+ opCtx->recoveryUnit()->writingInt(_details->indexBuildsInProgress) += 1;
// 3) indexes entry in .ns file and system.namespaces
- _db->createNamespaceForIndex(txn, desc->indexNamespace());
+ _db->createNamespaceForIndex(opCtx, desc->indexNamespace());
// TODO SERVER-22727: Create an entry for path-level multikey info when creating the new index.
// Mark the collation feature as in use if the index has a non-simple collation.
if (spec["collation"]) {
- _db->markCollationFeatureAsInUse(txn);
+ _db->markCollationFeatureAsInUse(opCtx);
}
return Status::OK();
}
-void NamespaceDetailsCollectionCatalogEntry::indexBuildSuccess(OperationContext* txn,
+void NamespaceDetailsCollectionCatalogEntry::indexBuildSuccess(OperationContext* opCtx,
StringData indexName) {
- int idxNo = _findIndexNumber(txn, indexName);
+ int idxNo = _findIndexNumber(opCtx, indexName);
fassert(17202, idxNo >= 0);
// Make sure the newly created index is relocated to nIndexes, if it isn't already there
- if (idxNo != getCompletedIndexCount(txn)) {
- int toIdxNo = getCompletedIndexCount(txn);
+ if (idxNo != getCompletedIndexCount(opCtx)) {
+ int toIdxNo = getCompletedIndexCount(opCtx);
- //_details->swapIndex( txn, idxNo, toIdxNo );
+ //_details->swapIndex( opCtx, idxNo, toIdxNo );
// flip main meta data
IndexDetails temp = _details->idx(idxNo);
- *txn->recoveryUnit()->writing(&_details->idx(idxNo)) = _details->idx(toIdxNo);
- *txn->recoveryUnit()->writing(&_details->idx(toIdxNo)) = temp;
+ *opCtx->recoveryUnit()->writing(&_details->idx(idxNo)) = _details->idx(toIdxNo);
+ *opCtx->recoveryUnit()->writing(&_details->idx(toIdxNo)) = temp;
// flip multi key bits
bool tempMultikey = isIndexMultikey(idxNo);
- setIndexIsMultikey(txn, idxNo, isIndexMultikey(toIdxNo));
- setIndexIsMultikey(txn, toIdxNo, tempMultikey);
+ setIndexIsMultikey(opCtx, idxNo, isIndexMultikey(toIdxNo));
+ setIndexIsMultikey(opCtx, toIdxNo, tempMultikey);
idxNo = toIdxNo;
- invariant((idxNo = _findIndexNumber(txn, indexName)));
+ invariant((idxNo = _findIndexNumber(opCtx, indexName)));
}
- txn->recoveryUnit()->writingInt(_details->indexBuildsInProgress) -= 1;
- txn->recoveryUnit()->writingInt(_details->nIndexes) += 1;
+ opCtx->recoveryUnit()->writingInt(_details->indexBuildsInProgress) -= 1;
+ opCtx->recoveryUnit()->writingInt(_details->nIndexes) += 1;
- invariant(isIndexReady(txn, indexName));
+ invariant(isIndexReady(opCtx, indexName));
}
-void NamespaceDetailsCollectionCatalogEntry::updateTTLSetting(OperationContext* txn,
+void NamespaceDetailsCollectionCatalogEntry::updateTTLSetting(OperationContext* opCtx,
StringData idxName,
long long newExpireSeconds) {
- int idx = _findIndexNumber(txn, idxName);
+ int idx = _findIndexNumber(opCtx, idxName);
invariant(idx >= 0);
IndexDetails& indexDetails = _details->idx(idx);
- BSONObj obj = _indexRecordStore->dataFor(txn, indexDetails.info.toRecordId()).toBson();
+ BSONObj obj = _indexRecordStore->dataFor(opCtx, indexDetails.info.toRecordId()).toBson();
const BSONElement oldExpireSecs = obj.getField("expireAfterSeconds");
// Important that we set the new value in-place. We are writing directly to the
@@ -358,14 +358,14 @@ void NamespaceDetailsCollectionCatalogEntry::updateTTLSetting(OperationContext*
massert(16631, "index does not have an 'expireAfterSeconds' field", false);
break;
case NumberInt:
- *txn->recoveryUnit()->writing(reinterpret_cast<int*>(nonConstPtr)) = newExpireSeconds;
+ *opCtx->recoveryUnit()->writing(reinterpret_cast<int*>(nonConstPtr)) = newExpireSeconds;
break;
case NumberDouble:
- *txn->recoveryUnit()->writing(reinterpret_cast<double*>(nonConstPtr)) =
+ *opCtx->recoveryUnit()->writing(reinterpret_cast<double*>(nonConstPtr)) =
newExpireSeconds;
break;
case NumberLong:
- *txn->recoveryUnit()->writing(reinterpret_cast<long long*>(nonConstPtr)) =
+ *opCtx->recoveryUnit()->writing(reinterpret_cast<long long*>(nonConstPtr)) =
newExpireSeconds;
break;
default:
@@ -373,65 +373,66 @@ void NamespaceDetailsCollectionCatalogEntry::updateTTLSetting(OperationContext*
}
}
-void NamespaceDetailsCollectionCatalogEntry::_updateSystemNamespaces(OperationContext* txn,
+void NamespaceDetailsCollectionCatalogEntry::_updateSystemNamespaces(OperationContext* opCtx,
const BSONObj& update) {
if (!_namespacesRecordStore)
return;
- RecordData entry = _namespacesRecordStore->dataFor(txn, _namespacesRecordId);
+ RecordData entry = _namespacesRecordStore->dataFor(opCtx, _namespacesRecordId);
const BSONObj newEntry = applyUpdateOperators(entry.releaseToBson(), update);
Status result = _namespacesRecordStore->updateRecord(
- txn, _namespacesRecordId, newEntry.objdata(), newEntry.objsize(), false, NULL);
+ opCtx, _namespacesRecordId, newEntry.objdata(), newEntry.objsize(), false, NULL);
if (ErrorCodes::NeedsDocumentMove == result) {
StatusWith<RecordId> newLocation = _namespacesRecordStore->insertRecord(
- txn, newEntry.objdata(), newEntry.objsize(), false);
+ opCtx, newEntry.objdata(), newEntry.objsize(), false);
fassert(40074, newLocation.getStatus().isOK());
// Invalidate old namespace record
MMAPV1DatabaseCatalogEntry::invalidateSystemCollectionRecord(
- txn, NamespaceString(_db->name(), "system.namespaces"), _namespacesRecordId);
+ opCtx, NamespaceString(_db->name(), "system.namespaces"), _namespacesRecordId);
- _namespacesRecordStore->deleteRecord(txn, _namespacesRecordId);
+ _namespacesRecordStore->deleteRecord(opCtx, _namespacesRecordId);
- setNamespacesRecordId(txn, newLocation.getValue());
+ setNamespacesRecordId(opCtx, newLocation.getValue());
} else {
fassert(17486, result.isOK());
}
}
-void NamespaceDetailsCollectionCatalogEntry::updateFlags(OperationContext* txn, int newValue) {
+void NamespaceDetailsCollectionCatalogEntry::updateFlags(OperationContext* opCtx, int newValue) {
NamespaceDetailsRSV1MetaData md(ns().ns(), _details);
- md.replaceUserFlags(txn, newValue);
- _updateSystemNamespaces(txn, BSON("$set" << BSON("options.flags" << newValue)));
+ md.replaceUserFlags(opCtx, newValue);
+ _updateSystemNamespaces(opCtx, BSON("$set" << BSON("options.flags" << newValue)));
}
-void NamespaceDetailsCollectionCatalogEntry::updateValidator(OperationContext* txn,
+void NamespaceDetailsCollectionCatalogEntry::updateValidator(OperationContext* opCtx,
const BSONObj& validator,
StringData validationLevel,
StringData validationAction) {
_updateSystemNamespaces(
- txn,
+ opCtx,
BSON("$set" << BSON("options.validator" << validator << "options.validationLevel"
<< validationLevel
<< "options.validationAction"
<< validationAction)));
}
-void NamespaceDetailsCollectionCatalogEntry::setNamespacesRecordId(OperationContext* txn,
+void NamespaceDetailsCollectionCatalogEntry::setNamespacesRecordId(OperationContext* opCtx,
RecordId newId) {
if (newId.isNull()) {
invariant(ns().coll() == "system.namespaces" || ns().coll() == "system.indexes");
} else {
- // 'txn' is allowed to be null, but we don't need an OperationContext in MMAP, so that's OK.
- auto namespaceEntry = _namespacesRecordStore->dataFor(txn, newId).releaseToBson();
+ // 'opCtx' is allowed to be null, but we don't need an OperationContext in MMAP, so that's
+ // OK.
+ auto namespaceEntry = _namespacesRecordStore->dataFor(opCtx, newId).releaseToBson();
invariant(namespaceEntry["name"].String() == ns().ns());
// Register RecordId change for rollback if we're not initializing.
- if (txn && !_namespacesRecordId.isNull()) {
+ if (opCtx && !_namespacesRecordId.isNull()) {
auto oldNamespacesRecordId = _namespacesRecordId;
- txn->recoveryUnit()->onRollback([=] { _namespacesRecordId = oldNamespacesRecordId; });
+ opCtx->recoveryUnit()->onRollback([=] { _namespacesRecordId = oldNamespacesRecordId; });
}
_namespacesRecordId = newId;
}
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.h b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.h
index d1fc4e948c1..06b370bfa1f 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.h
@@ -55,63 +55,63 @@ public:
~NamespaceDetailsCollectionCatalogEntry() {}
- CollectionOptions getCollectionOptions(OperationContext* txn) const final;
+ CollectionOptions getCollectionOptions(OperationContext* opCtx) const final;
- int getTotalIndexCount(OperationContext* txn) const final;
+ int getTotalIndexCount(OperationContext* opCtx) const final;
- int getCompletedIndexCount(OperationContext* txn) const final;
+ int getCompletedIndexCount(OperationContext* opCtx) const final;
int getMaxAllowedIndexes() const final;
- void getAllIndexes(OperationContext* txn, std::vector<std::string>* names) const final;
+ void getAllIndexes(OperationContext* opCtx, std::vector<std::string>* names) const final;
- BSONObj getIndexSpec(OperationContext* txn, StringData idxName) const final;
+ BSONObj getIndexSpec(OperationContext* opCtx, StringData idxName) const final;
- bool isIndexMultikey(OperationContext* txn,
+ bool isIndexMultikey(OperationContext* opCtx,
StringData indexName,
MultikeyPaths* multikeyPaths) const final;
bool isIndexMultikey(int idxNo) const;
- bool setIndexIsMultikey(OperationContext* txn, int idxNo, bool multikey = true);
- bool setIndexIsMultikey(OperationContext* txn,
+ bool setIndexIsMultikey(OperationContext* opCtx, int idxNo, bool multikey = true);
+ bool setIndexIsMultikey(OperationContext* opCtx,
StringData indexName,
const MultikeyPaths& multikeyPaths) final;
- RecordId getIndexHead(OperationContext* txn, StringData indexName) const final;
+ RecordId getIndexHead(OperationContext* opCtx, StringData indexName) const final;
- void setIndexHead(OperationContext* txn, StringData indexName, const RecordId& newHead) final;
+ void setIndexHead(OperationContext* opCtx, StringData indexName, const RecordId& newHead) final;
- bool isIndexReady(OperationContext* txn, StringData indexName) const final;
+ bool isIndexReady(OperationContext* opCtx, StringData indexName) const final;
- Status removeIndex(OperationContext* txn, StringData indexName) final;
+ Status removeIndex(OperationContext* opCtx, StringData indexName) final;
- Status prepareForIndexBuild(OperationContext* txn, const IndexDescriptor* spec) final;
+ Status prepareForIndexBuild(OperationContext* opCtx, const IndexDescriptor* spec) final;
- void indexBuildSuccess(OperationContext* txn, StringData indexName) final;
+ void indexBuildSuccess(OperationContext* opCtx, StringData indexName) final;
- void updateTTLSetting(OperationContext* txn,
+ void updateTTLSetting(OperationContext* opCtx,
StringData idxName,
long long newExpireSeconds) final;
- void updateFlags(OperationContext* txn, int newValue) final;
+ void updateFlags(OperationContext* opCtx, int newValue) final;
- void updateValidator(OperationContext* txn,
+ void updateValidator(OperationContext* opCtx,
const BSONObj& validator,
StringData validationLevel,
StringData validationAction) final;
// not part of interface, but available to my storage engine
- int _findIndexNumber(OperationContext* txn, StringData indexName) const;
+ int _findIndexNumber(OperationContext* opCtx, StringData indexName) const;
RecordId getNamespacesRecordId() {
return _namespacesRecordId;
}
/**
- * 'txn' is only allowed to be null when called from the constructor.
+ * 'opCtx' is only allowed to be null when called from the constructor.
*/
- void setNamespacesRecordId(OperationContext* txn, RecordId newId);
+ void setNamespacesRecordId(OperationContext* opCtx, RecordId newId);
private:
NamespaceDetails* _details;
@@ -127,7 +127,7 @@ private:
* Updates the entry for this namespace in '_namespacesRecordStore', updating
* '_namespacesRecordId' if necessary.
*/
- void _updateSystemNamespaces(OperationContext* txn, const BSONObj& update);
+ void _updateSystemNamespaces(OperationContext* opCtx, const BSONObj& update);
friend class MMAPV1DatabaseCatalogEntry;
};
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp
index eaa3a1cf958..7d5f1805d68 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp
@@ -48,16 +48,17 @@ const DiskLoc& NamespaceDetailsRSV1MetaData::capExtent() const {
return _details->capExtent;
}
-void NamespaceDetailsRSV1MetaData::setCapExtent(OperationContext* txn, const DiskLoc& loc) {
- *txn->recoveryUnit()->writing(&_details->capExtent) = loc;
+void NamespaceDetailsRSV1MetaData::setCapExtent(OperationContext* opCtx, const DiskLoc& loc) {
+ *opCtx->recoveryUnit()->writing(&_details->capExtent) = loc;
}
const DiskLoc& NamespaceDetailsRSV1MetaData::capFirstNewRecord() const {
return _details->capFirstNewRecord;
}
-void NamespaceDetailsRSV1MetaData::setCapFirstNewRecord(OperationContext* txn, const DiskLoc& loc) {
- *txn->recoveryUnit()->writing(&_details->capFirstNewRecord) = loc;
+void NamespaceDetailsRSV1MetaData::setCapFirstNewRecord(OperationContext* opCtx,
+ const DiskLoc& loc) {
+ *opCtx->recoveryUnit()->writing(&_details->capFirstNewRecord) = loc;
}
bool NamespaceDetailsRSV1MetaData::capLooped() const {
@@ -71,19 +72,19 @@ long long NamespaceDetailsRSV1MetaData::numRecords() const {
return _details->stats.nrecords;
}
-void NamespaceDetailsRSV1MetaData::incrementStats(OperationContext* txn,
+void NamespaceDetailsRSV1MetaData::incrementStats(OperationContext* opCtx,
long long dataSizeIncrement,
long long numRecordsIncrement) {
// durability todo : this could be a bit annoying / slow to record constantly
- NamespaceDetails::Stats* s = txn->recoveryUnit()->writing(&_details->stats);
+ NamespaceDetails::Stats* s = opCtx->recoveryUnit()->writing(&_details->stats);
s->datasize += dataSizeIncrement;
s->nrecords += numRecordsIncrement;
}
-void NamespaceDetailsRSV1MetaData::setStats(OperationContext* txn,
+void NamespaceDetailsRSV1MetaData::setStats(OperationContext* opCtx,
long long dataSize,
long long numRecords) {
- NamespaceDetails::Stats* s = txn->recoveryUnit()->writing(&_details->stats);
+ NamespaceDetails::Stats* s = opCtx->recoveryUnit()->writing(&_details->stats);
s->datasize = dataSize;
s->nrecords = numRecords;
}
@@ -103,45 +104,45 @@ DiskLoc NamespaceDetailsRSV1MetaData::deletedListEntry(int bucket) const {
return head;
}
-void NamespaceDetailsRSV1MetaData::setDeletedListEntry(OperationContext* txn,
+void NamespaceDetailsRSV1MetaData::setDeletedListEntry(OperationContext* opCtx,
int bucket,
const DiskLoc& loc) {
DiskLoc* head = (bucket < NamespaceDetails::SmallBuckets)
? &_details->deletedListSmall[bucket]
: &_details->deletedListLarge[bucket - NamespaceDetails::SmallBuckets];
- *txn->recoveryUnit()->writing(head) = loc;
+ *opCtx->recoveryUnit()->writing(head) = loc;
}
DiskLoc NamespaceDetailsRSV1MetaData::deletedListLegacyGrabBag() const {
return _details->deletedListLegacyGrabBag;
}
-void NamespaceDetailsRSV1MetaData::setDeletedListLegacyGrabBag(OperationContext* txn,
+void NamespaceDetailsRSV1MetaData::setDeletedListLegacyGrabBag(OperationContext* opCtx,
const DiskLoc& loc) {
- *txn->recoveryUnit()->writing(&_details->deletedListLegacyGrabBag) = loc;
+ *opCtx->recoveryUnit()->writing(&_details->deletedListLegacyGrabBag) = loc;
}
-void NamespaceDetailsRSV1MetaData::orphanDeletedList(OperationContext* txn) {
+void NamespaceDetailsRSV1MetaData::orphanDeletedList(OperationContext* opCtx) {
for (int i = 0; i < RecordStoreV1Base::Buckets; i++) {
- setDeletedListEntry(txn, i, DiskLoc());
+ setDeletedListEntry(opCtx, i, DiskLoc());
}
- setDeletedListLegacyGrabBag(txn, DiskLoc());
+ setDeletedListLegacyGrabBag(opCtx, DiskLoc());
}
-const DiskLoc& NamespaceDetailsRSV1MetaData::firstExtent(OperationContext* txn) const {
+const DiskLoc& NamespaceDetailsRSV1MetaData::firstExtent(OperationContext* opCtx) const {
return _details->firstExtent;
}
-void NamespaceDetailsRSV1MetaData::setFirstExtent(OperationContext* txn, const DiskLoc& loc) {
- *txn->recoveryUnit()->writing(&_details->firstExtent) = loc;
+void NamespaceDetailsRSV1MetaData::setFirstExtent(OperationContext* opCtx, const DiskLoc& loc) {
+ *opCtx->recoveryUnit()->writing(&_details->firstExtent) = loc;
}
-const DiskLoc& NamespaceDetailsRSV1MetaData::lastExtent(OperationContext* txn) const {
+const DiskLoc& NamespaceDetailsRSV1MetaData::lastExtent(OperationContext* opCtx) const {
return _details->lastExtent;
}
-void NamespaceDetailsRSV1MetaData::setLastExtent(OperationContext* txn, const DiskLoc& loc) {
- *txn->recoveryUnit()->writing(&_details->lastExtent) = loc;
+void NamespaceDetailsRSV1MetaData::setLastExtent(OperationContext* opCtx, const DiskLoc& loc) {
+ *opCtx->recoveryUnit()->writing(&_details->lastExtent) = loc;
}
bool NamespaceDetailsRSV1MetaData::isCapped() const {
@@ -156,38 +157,38 @@ int NamespaceDetailsRSV1MetaData::userFlags() const {
return _details->userFlags;
}
-bool NamespaceDetailsRSV1MetaData::setUserFlag(OperationContext* txn, int flag) {
+bool NamespaceDetailsRSV1MetaData::setUserFlag(OperationContext* opCtx, int flag) {
if ((_details->userFlags & flag) == flag)
return false;
- txn->recoveryUnit()->writingInt(_details->userFlags) |= flag;
+ opCtx->recoveryUnit()->writingInt(_details->userFlags) |= flag;
return true;
}
-bool NamespaceDetailsRSV1MetaData::clearUserFlag(OperationContext* txn, int flag) {
+bool NamespaceDetailsRSV1MetaData::clearUserFlag(OperationContext* opCtx, int flag) {
if ((_details->userFlags & flag) == 0)
return false;
- txn->recoveryUnit()->writingInt(_details->userFlags) &= ~flag;
+ opCtx->recoveryUnit()->writingInt(_details->userFlags) &= ~flag;
return true;
}
-bool NamespaceDetailsRSV1MetaData::replaceUserFlags(OperationContext* txn, int flags) {
+bool NamespaceDetailsRSV1MetaData::replaceUserFlags(OperationContext* opCtx, int flags) {
if (_details->userFlags == flags)
return false;
- txn->recoveryUnit()->writingInt(_details->userFlags) = flags;
+ opCtx->recoveryUnit()->writingInt(_details->userFlags) = flags;
return true;
}
-int NamespaceDetailsRSV1MetaData::lastExtentSize(OperationContext* txn) const {
+int NamespaceDetailsRSV1MetaData::lastExtentSize(OperationContext* opCtx) const {
return _details->lastExtentSize;
}
-void NamespaceDetailsRSV1MetaData::setLastExtentSize(OperationContext* txn, int newMax) {
+void NamespaceDetailsRSV1MetaData::setLastExtentSize(OperationContext* opCtx, int newMax) {
if (_details->lastExtentSize == newMax)
return;
- txn->recoveryUnit()->writingInt(_details->lastExtentSize) = newMax;
+ opCtx->recoveryUnit()->writingInt(_details->lastExtentSize) = newMax;
}
long long NamespaceDetailsRSV1MetaData::maxCappedDocs() const {
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.h b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.h
index a6fde4807b5..26f0a16803f 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.h
@@ -51,46 +51,46 @@ public:
virtual ~NamespaceDetailsRSV1MetaData() {}
virtual const DiskLoc& capExtent() const;
- virtual void setCapExtent(OperationContext* txn, const DiskLoc& loc);
+ virtual void setCapExtent(OperationContext* opCtx, const DiskLoc& loc);
virtual const DiskLoc& capFirstNewRecord() const;
- virtual void setCapFirstNewRecord(OperationContext* txn, const DiskLoc& loc);
+ virtual void setCapFirstNewRecord(OperationContext* opCtx, const DiskLoc& loc);
virtual bool capLooped() const;
virtual long long dataSize() const;
virtual long long numRecords() const;
- virtual void incrementStats(OperationContext* txn,
+ virtual void incrementStats(OperationContext* opCtx,
long long dataSizeIncrement,
long long numRecordsIncrement);
- virtual void setStats(OperationContext* txn, long long dataSize, long long numRecords);
+ virtual void setStats(OperationContext* opCtx, long long dataSize, long long numRecords);
virtual DiskLoc deletedListEntry(int bucket) const;
- virtual void setDeletedListEntry(OperationContext* txn, int bucket, const DiskLoc& loc);
+ virtual void setDeletedListEntry(OperationContext* opCtx, int bucket, const DiskLoc& loc);
virtual DiskLoc deletedListLegacyGrabBag() const;
- virtual void setDeletedListLegacyGrabBag(OperationContext* txn, const DiskLoc& loc);
+ virtual void setDeletedListLegacyGrabBag(OperationContext* opCtx, const DiskLoc& loc);
- virtual void orphanDeletedList(OperationContext* txn);
+ virtual void orphanDeletedList(OperationContext* opCtx);
- virtual const DiskLoc& firstExtent(OperationContext* txn) const;
- virtual void setFirstExtent(OperationContext* txn, const DiskLoc& loc);
+ virtual const DiskLoc& firstExtent(OperationContext* opCtx) const;
+ virtual void setFirstExtent(OperationContext* opCtx, const DiskLoc& loc);
- virtual const DiskLoc& lastExtent(OperationContext* txn) const;
- virtual void setLastExtent(OperationContext* txn, const DiskLoc& loc);
+ virtual const DiskLoc& lastExtent(OperationContext* opCtx) const;
+ virtual void setLastExtent(OperationContext* opCtx, const DiskLoc& loc);
virtual bool isCapped() const;
virtual bool isUserFlagSet(int flag) const;
virtual int userFlags() const;
- virtual bool setUserFlag(OperationContext* txn, int flag);
- virtual bool clearUserFlag(OperationContext* txn, int flag);
- virtual bool replaceUserFlags(OperationContext* txn, int flags);
+ virtual bool setUserFlag(OperationContext* opCtx, int flag);
+ virtual bool clearUserFlag(OperationContext* opCtx, int flag);
+ virtual bool replaceUserFlags(OperationContext* opCtx, int flags);
- virtual int lastExtentSize(OperationContext* txn) const;
- virtual void setLastExtentSize(OperationContext* txn, int newMax);
+ virtual int lastExtentSize(OperationContext* opCtx) const;
+ virtual void setLastExtentSize(OperationContext* opCtx, int newMax);
virtual long long maxCappedDocs() const;
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp
index 173c2afceca..fe9704e0e2d 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp
@@ -51,10 +51,10 @@ using std::endl;
using std::list;
using std::string;
-NamespaceIndex::NamespaceIndex(OperationContext* txn,
+NamespaceIndex::NamespaceIndex(OperationContext* opCtx,
const std::string& dir,
const std::string& database)
- : _dir(dir), _database(database), _f(txn, MongoFile::Options::SEQUENTIAL), _ht(nullptr) {}
+ : _dir(dir), _database(database), _f(opCtx, MongoFile::Options::SEQUENTIAL), _ht(nullptr) {}
NamespaceIndex::~NamespaceIndex() {}
@@ -67,33 +67,38 @@ NamespaceDetails* NamespaceIndex::details(const Namespace& ns) const {
return _ht->get(ns);
}
-void NamespaceIndex::add_ns(OperationContext* txn, StringData ns, const DiskLoc& loc, bool capped) {
+void NamespaceIndex::add_ns(OperationContext* opCtx,
+ StringData ns,
+ const DiskLoc& loc,
+ bool capped) {
NamespaceDetails details(loc, capped);
- add_ns(txn, ns, &details);
+ add_ns(opCtx, ns, &details);
}
-void NamespaceIndex::add_ns(OperationContext* txn, StringData ns, const NamespaceDetails* details) {
+void NamespaceIndex::add_ns(OperationContext* opCtx,
+ StringData ns,
+ const NamespaceDetails* details) {
Namespace n(ns);
- add_ns(txn, n, details);
+ add_ns(opCtx, n, details);
}
-void NamespaceIndex::add_ns(OperationContext* txn,
+void NamespaceIndex::add_ns(OperationContext* opCtx,
const Namespace& ns,
const NamespaceDetails* details) {
const NamespaceString nss(ns.toString());
- invariant(txn->lockState()->isDbLockedForMode(nss.db(), MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(nss.db(), MODE_X));
massert(17315, "no . in ns", nsIsFull(nss.toString()));
- uassert(10081, "too many namespaces/collections", _ht->put(txn, ns, *details));
+ uassert(10081, "too many namespaces/collections", _ht->put(opCtx, ns, *details));
}
-void NamespaceIndex::kill_ns(OperationContext* txn, StringData ns) {
+void NamespaceIndex::kill_ns(OperationContext* opCtx, StringData ns) {
const NamespaceString nss(ns.toString());
- invariant(txn->lockState()->isDbLockedForMode(nss.db(), MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(nss.db(), MODE_X));
const Namespace n(ns);
- _ht->kill(txn, n);
+ _ht->kill(opCtx, n);
if (ns.size() <= Namespace::MaxNsColletionLen) {
// Larger namespace names don't have room for $extras so they can't exist. The code
@@ -102,7 +107,7 @@ void NamespaceIndex::kill_ns(OperationContext* txn, StringData ns) {
for (int i = 0; i <= 1; i++) {
try {
Namespace extra(n.extraName(i));
- _ht->kill(txn, extra);
+ _ht->kill(opCtx, extra);
} catch (DBException&) {
LOG(3) << "caught exception in kill_ns" << endl;
}
@@ -147,7 +152,7 @@ void NamespaceIndex::maybeMkdir() const {
"create dir for db ");
}
-void NamespaceIndex::init(OperationContext* txn) {
+void NamespaceIndex::init(OperationContext* opCtx) {
invariant(!_ht.get());
unsigned long long len = 0;
@@ -158,7 +163,7 @@ void NamespaceIndex::init(OperationContext* txn) {
void* p = 0;
if (boost::filesystem::exists(nsPath)) {
- if (_f.open(txn, pathString)) {
+ if (_f.open(opCtx, pathString)) {
len = _f.length();
if (len % (1024 * 1024) != 0) {
@@ -217,7 +222,7 @@ void NamespaceIndex::init(OperationContext* txn) {
massert(18826, str::stream() << "failure writing file " << pathString, !file.bad());
}
- if (_f.create(txn, pathString, l)) {
+ if (_f.create(opCtx, pathString, l)) {
// The writes done in this function must not be rolled back. This will leave the
// file empty, but available for future use. That is why we go directly to the
// global dur dirty list rather than going through the OperationContext.
@@ -226,7 +231,7 @@ void NamespaceIndex::init(OperationContext* txn) {
// Commit the journal and all changes to disk so that even if exceptions occur
// during subsequent initialization, we won't have uncommited changes during file
// close.
- getDur().commitNow(txn);
+ getDur().commitNow(opCtx);
len = l;
invariant(len == mmapv1GlobalOptions.lenForNewNsFiles);
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_index.h b/src/mongo/db/storage/mmap_v1/catalog/namespace_index.h
index 51aae08ea61..5b7766b4035 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_index.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_index.h
@@ -51,30 +51,30 @@ class NamespaceIndex {
MONGO_DISALLOW_COPYING(NamespaceIndex);
public:
- NamespaceIndex(OperationContext* txn, const std::string& dir, const std::string& database);
+ NamespaceIndex(OperationContext* opCtx, const std::string& dir, const std::string& database);
~NamespaceIndex();
/**
* Must be called before destruction.
*/
- void close(OperationContext* txn) {
- LockMongoFilesExclusive lock(txn);
- _f.close(txn);
+ void close(OperationContext* opCtx) {
+ LockMongoFilesExclusive lock(opCtx);
+ _f.close(opCtx);
}
/* returns true if the file represented by this file exists on disk */
bool pathExists() const;
- void init(OperationContext* txn);
+ void init(OperationContext* opCtx);
- void add_ns(OperationContext* txn, StringData ns, const DiskLoc& loc, bool capped);
- void add_ns(OperationContext* txn, StringData ns, const NamespaceDetails* details);
- void add_ns(OperationContext* txn, const Namespace& ns, const NamespaceDetails* details);
+ void add_ns(OperationContext* opCtx, StringData ns, const DiskLoc& loc, bool capped);
+ void add_ns(OperationContext* opCtx, StringData ns, const NamespaceDetails* details);
+ void add_ns(OperationContext* opCtx, const Namespace& ns, const NamespaceDetails* details);
NamespaceDetails* details(StringData ns) const;
NamespaceDetails* details(const Namespace& ns) const;
- void kill_ns(OperationContext* txn, StringData ns);
+ void kill_ns(OperationContext* opCtx, StringData ns);
bool allocated() const {
return _ht.get() != 0;
diff --git a/src/mongo/db/storage/mmap_v1/data_file.cpp b/src/mongo/db/storage/mmap_v1/data_file.cpp
index d81aa591817..46af46c0a47 100644
--- a/src/mongo/db/storage/mmap_v1/data_file.cpp
+++ b/src/mongo/db/storage/mmap_v1/data_file.cpp
@@ -104,14 +104,14 @@ int DataFile::_defaultSize() const {
}
/** @return true if found and opened. if uninitialized (prealloc only) does not open. */
-Status DataFile::openExisting(OperationContext* txn, const char* filename) {
+Status DataFile::openExisting(OperationContext* opCtx, const char* filename) {
invariant(_mb == 0);
if (!boost::filesystem::exists(filename)) {
return Status(ErrorCodes::InvalidPath, "DataFile::openExisting - file does not exist");
}
- if (!mmf.open(txn, filename)) {
+ if (!mmf.open(opCtx, filename)) {
return Status(ErrorCodes::InternalError, "DataFile::openExisting - mmf.open failed");
}
@@ -138,7 +138,7 @@ Status DataFile::openExisting(OperationContext* txn, const char* filename) {
return Status::OK();
}
-void DataFile::open(OperationContext* txn,
+void DataFile::open(OperationContext* opCtx,
const char* filename,
int minSize,
bool preallocateOnly) {
@@ -170,7 +170,7 @@ void DataFile::open(OperationContext* txn,
{
invariant(_mb == 0);
unsigned long long sz = size;
- if (mmf.create(txn, filename, sz)) {
+ if (mmf.create(opCtx, filename, sz)) {
_mb = mmf.getView();
}
@@ -179,14 +179,14 @@ void DataFile::open(OperationContext* txn,
}
data_file_check(_mb);
- header()->init(txn, _fileNo, size, filename);
+ header()->init(opCtx, _fileNo, size, filename);
}
void DataFile::flush(bool sync) {
mmf.flush(sync);
}
-DiskLoc DataFile::allocExtentArea(OperationContext* txn, int size) {
+DiskLoc DataFile::allocExtentArea(OperationContext* opCtx, int size) {
// The header would be NULL if file open failed. However, if file open failed we should
// never be entering here.
invariant(header());
@@ -195,15 +195,18 @@ DiskLoc DataFile::allocExtentArea(OperationContext* txn, int size) {
int offset = header()->unused.getOfs();
DataFileHeader* h = header();
- *txn->recoveryUnit()->writing(&h->unused) = DiskLoc(_fileNo, offset + size);
- txn->recoveryUnit()->writingInt(h->unusedLength) = h->unusedLength - size;
+ *opCtx->recoveryUnit()->writing(&h->unused) = DiskLoc(_fileNo, offset + size);
+ opCtx->recoveryUnit()->writingInt(h->unusedLength) = h->unusedLength - size;
return DiskLoc(_fileNo, offset);
}
// -------------------------------------------------------------------------------
-void DataFileHeader::init(OperationContext* txn, int fileno, int filelength, const char* filename) {
+void DataFileHeader::init(OperationContext* opCtx,
+ int fileno,
+ int filelength,
+ const char* filename) {
if (uninitialized()) {
DEV log() << "datafileheader::init initializing " << filename << " n:" << fileno << endl;
@@ -233,17 +236,17 @@ void DataFileHeader::init(OperationContext* txn, int fileno, int filelength, con
freeListStart.Null();
freeListEnd.Null();
} else {
- checkUpgrade(txn);
+ checkUpgrade(opCtx);
}
}
-void DataFileHeader::checkUpgrade(OperationContext* txn) {
+void DataFileHeader::checkUpgrade(OperationContext* opCtx) {
if (freeListStart == DiskLoc(0, 0)) {
// we are upgrading from 2.4 to 2.6
invariant(freeListEnd == DiskLoc(0, 0)); // both start and end should be (0,0) or real
- WriteUnitOfWork wunit(txn);
- *txn->recoveryUnit()->writing(&freeListStart) = DiskLoc();
- *txn->recoveryUnit()->writing(&freeListEnd) = DiskLoc();
+ WriteUnitOfWork wunit(opCtx);
+ *opCtx->recoveryUnit()->writing(&freeListStart) = DiskLoc();
+ *opCtx->recoveryUnit()->writing(&freeListEnd) = DiskLoc();
wunit.commit();
}
}
diff --git a/src/mongo/db/storage/mmap_v1/data_file.h b/src/mongo/db/storage/mmap_v1/data_file.h
index 57b5fb223f9..60dc095791e 100644
--- a/src/mongo/db/storage/mmap_v1/data_file.h
+++ b/src/mongo/db/storage/mmap_v1/data_file.h
@@ -182,9 +182,9 @@ public:
return version.majorRaw() == 0;
}
- void init(OperationContext* txn, int fileno, int filelength, const char* filename);
+ void init(OperationContext* opCtx, int fileno, int filelength, const char* filename);
- void checkUpgrade(OperationContext* txn);
+ void checkUpgrade(OperationContext* opCtx);
bool isEmpty() const {
return uninitialized() || (unusedLength == fileLength - HeaderSize - 16);
@@ -195,13 +195,13 @@ public:
class DataFile {
public:
- DataFile(OperationContext* txn, int fn) : _fileNo(fn), mmf(txn), _mb(NULL) {}
+ DataFile(OperationContext* opCtx, int fn) : _fileNo(fn), mmf(opCtx), _mb(NULL) {}
/** @return true if found and opened. if uninitialized (prealloc only) does not open. */
- Status openExisting(OperationContext* txn, const char* filename);
+ Status openExisting(OperationContext* opCtx, const char* filename);
/** creates if DNE */
- void open(OperationContext* txn,
+ void open(OperationContext* opCtx,
const char* filename,
int requestedDataSize = 0,
bool preallocateOnly = false);
@@ -209,12 +209,12 @@ public:
/**
* Must be called before destruction.
*/
- void close(OperationContext* txn) {
- LockMongoFilesExclusive lock(txn);
- mmf.close(txn);
+ void close(OperationContext* opCtx) {
+ LockMongoFilesExclusive lock(opCtx);
+ mmf.close(opCtx);
}
- DiskLoc allocExtentArea(OperationContext* txn, int size);
+ DiskLoc allocExtentArea(OperationContext* opCtx, int size);
DataFileHeader* getHeader() {
return header();
diff --git a/src/mongo/db/storage/mmap_v1/data_file_sync.cpp b/src/mongo/db/storage/mmap_v1/data_file_sync.cpp
index e1bc51d29f3..ab7dca95ff9 100644
--- a/src/mongo/db/storage/mmap_v1/data_file_sync.cpp
+++ b/src/mongo/db/storage/mmap_v1/data_file_sync.cpp
@@ -81,12 +81,12 @@ void DataFileSync::run() {
break;
}
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
Date_t start = jsTime();
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
dur::notifyPreDataFileFlush();
- int numFiles = storageEngine->flushAllFiles(txn.get(), true);
+ int numFiles = storageEngine->flushAllFiles(opCtx.get(), true);
dur::notifyPostDataFileFlush();
time_flushing = durationCount<Milliseconds>(jsTime() - start);
@@ -100,7 +100,7 @@ void DataFileSync::run() {
}
}
-BSONObj DataFileSync::generateSection(OperationContext* txn,
+BSONObj DataFileSync::generateSection(OperationContext* opCtx,
const BSONElement& configElement) const {
if (!running()) {
return BSONObj();
diff --git a/src/mongo/db/storage/mmap_v1/data_file_sync.h b/src/mongo/db/storage/mmap_v1/data_file_sync.h
index b204fdad019..a26624f2c41 100644
--- a/src/mongo/db/storage/mmap_v1/data_file_sync.h
+++ b/src/mongo/db/storage/mmap_v1/data_file_sync.h
@@ -49,7 +49,8 @@ public:
void run();
- virtual BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const;
+ virtual BSONObj generateSection(OperationContext* opCtx,
+ const BSONElement& configElement) const;
private:
void _flushed(int ms);
diff --git a/src/mongo/db/storage/mmap_v1/dur.cpp b/src/mongo/db/storage/mmap_v1/dur.cpp
index 1ed496c3a6d..599e397f944 100644
--- a/src/mongo/db/storage/mmap_v1/dur.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur.cpp
@@ -165,7 +165,8 @@ public:
return true;
}
- virtual BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ virtual BSONObj generateSection(OperationContext* opCtx,
+ const BSONElement& configElement) const {
if (!getDur().isDurable()) {
return BSONObj();
}
@@ -193,18 +194,18 @@ public:
virtual bool waitUntilDurable() {
return false;
}
- virtual bool commitNow(OperationContext* txn) {
+ virtual bool commitNow(OperationContext* opCtx) {
return false;
}
virtual bool commitIfNeeded() {
return false;
}
- virtual void syncDataAndTruncateJournal(OperationContext* txn) {}
+ virtual void syncDataAndTruncateJournal(OperationContext* opCtx) {}
virtual bool isDurable() const {
return false;
}
virtual void closingFileNotification() {}
- virtual void commitAndStopDurThread(OperationContext* txn) {}
+ virtual void commitAndStopDurThread(OperationContext* opCtx) {}
};
@@ -219,14 +220,14 @@ public:
virtual void declareWriteIntents(const std::vector<std::pair<void*, unsigned>>& intents);
virtual void createdFile(const std::string& filename, unsigned long long len);
virtual bool waitUntilDurable();
- virtual bool commitNow(OperationContext* txn);
+ virtual bool commitNow(OperationContext* opCtx);
virtual bool commitIfNeeded();
- virtual void syncDataAndTruncateJournal(OperationContext* txn);
+ virtual void syncDataAndTruncateJournal(OperationContext* opCtx);
virtual bool isDurable() const {
return true;
}
virtual void closingFileNotification();
- virtual void commitAndStopDurThread(OperationContext* txn);
+ virtual void commitAndStopDurThread(OperationContext* opCtx);
void start(ClockSource* cs, int64_t serverStartMs);
@@ -318,7 +319,7 @@ void debugValidateFileMapsMatch(const DurableMappedFile* mmf) {
/**
* Main code of the remap private view function.
*/
-void remapPrivateViewImpl(OperationContext* txn, double fraction) {
+void remapPrivateViewImpl(OperationContext* opCtx, double fraction) {
LOG(4) << "journal REMAPPRIVATEVIEW" << endl;
// There is no way that the set of files can change while we are in this method, because
@@ -335,9 +336,9 @@ void remapPrivateViewImpl(OperationContext* txn, double fraction) {
// See SERVER-5680 to see why this code is necessary on Windows.
// See SERVER-8795 to see why this code is necessary on Solaris.
#if defined(_WIN32) || defined(__sun)
- LockMongoFilesExclusive lk(txn);
+ LockMongoFilesExclusive lk(opCtx);
#else
- LockMongoFilesShared lk(txn);
+ LockMongoFilesShared lk(opCtx);
#endif
std::set<MongoFile*>& files = MongoFile::getAllFiles();
@@ -381,7 +382,7 @@ void remapPrivateViewImpl(OperationContext* txn, double fraction) {
}
if (mmf->willNeedRemap()) {
- mmf->remapThePrivateView(txn);
+ mmf->remapThePrivateView(opCtx);
}
i++;
@@ -517,10 +518,10 @@ DurableInterface::~DurableInterface() {}
// DurableImpl
//
-bool DurableImpl::commitNow(OperationContext* txn) {
+bool DurableImpl::commitNow(OperationContext* opCtx) {
CommitNotifier::When when = commitNotify.now();
- AutoYieldFlushLockForMMAPV1Commit flushLockYield(txn->lockState());
+ AutoYieldFlushLockForMMAPV1Commit flushLockYield(opCtx->lockState());
// There is always just one waiting anyways
flushRequested.notify_one();
@@ -562,15 +563,15 @@ bool DurableImpl::commitIfNeeded() {
return true;
}
-void DurableImpl::syncDataAndTruncateJournal(OperationContext* txn) {
- invariant(txn->lockState()->isW());
+void DurableImpl::syncDataAndTruncateJournal(OperationContext* opCtx) {
+ invariant(opCtx->lockState()->isW());
// Once this returns, all the outstanding journal has been applied to the data files and
// so it's safe to do the flushAll/journalCleanup below.
- commitNow(txn);
+ commitNow(opCtx);
// Flush the shared view to disk.
- MongoFile::flushAll(txn, true);
+ MongoFile::flushAll(opCtx, true);
// Once the shared view has been flushed, we do not need the journal files anymore.
journalCleanup(true);
@@ -588,7 +589,7 @@ void DurableImpl::closingFileNotification() {
}
}
-void DurableImpl::commitAndStopDurThread(OperationContext* txn) {
+void DurableImpl::commitAndStopDurThread(OperationContext* opCtx) {
CommitNotifier::When when = commitNotify.now();
// There is always just one waiting anyways
@@ -600,7 +601,7 @@ void DurableImpl::commitAndStopDurThread(OperationContext* txn) {
applyToDataFilesNotify.waitFor(when);
// Flush the shared view to disk.
- MongoFile::flushAll(txn, true);
+ MongoFile::flushAll(opCtx, true);
// Once the shared view has been flushed, we do not need the journal files anymore.
journalCleanup(true);
@@ -630,14 +631,14 @@ void DurableImpl::start(ClockSource* cs, int64_t serverStartMs) {
* @param fraction Value between (0, 1] indicating what fraction of the memory to remap.
* Remapping too much or too frequently incurs copy-on-write page fault cost.
*/
-static void remapPrivateView(OperationContext* txn, double fraction) {
+static void remapPrivateView(OperationContext* opCtx, double fraction) {
// Remapping private views must occur after WRITETODATAFILES otherwise we wouldn't see any
// newly written data on reads.
invariant(!commitJob.hasWritten());
try {
Timer t;
- remapPrivateViewImpl(txn, fraction);
+ remapPrivateViewImpl(opCtx, fraction);
stats.curr()->_remapPrivateViewMicros += t.micros();
LOG(4) << "remapPrivateView end";
@@ -725,9 +726,9 @@ static void durThread(ClockSource* cs, int64_t serverStartMs) {
Timer t;
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- AutoAcquireFlushLockForMMAPV1Commit autoFlushLock(txn.lockState());
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ AutoAcquireFlushLockForMMAPV1Commit autoFlushLock(opCtx.lockState());
// We need to snapshot the commitNumber after the flush lock has been obtained,
// because at this point we know that we have a stable snapshot of the data.
@@ -828,7 +829,7 @@ static void durThread(ClockSource* cs, int64_t serverStartMs) {
// accessing it. Technically this step could be avoided on systems, which
// support atomic remap.
autoFlushLock.upgradeFlushLockToExclusive();
- remapPrivateView(txnPtr.get(), remapFraction);
+ remapPrivateView(opCtxPtr.get(), remapFraction);
autoFlushLock.release();
diff --git a/src/mongo/db/storage/mmap_v1/dur.h b/src/mongo/db/storage/mmap_v1/dur.h
index e4aec954749..b505de833f6 100644
--- a/src/mongo/db/storage/mmap_v1/dur.h
+++ b/src/mongo/db/storage/mmap_v1/dur.h
@@ -86,7 +86,7 @@ public:
@return true if --dur is on.
@return false if --dur is off. (in which case there is action)
*/
- virtual bool commitNow(OperationContext* txn) = 0;
+ virtual bool commitNow(OperationContext* opCtx) = 0;
/** Commit if enough bytes have been modified. Current threshold is 50MB
@@ -112,7 +112,7 @@ public:
*
* Must be called under the global X lock.
*/
- virtual void commitAndStopDurThread(OperationContext* txn) = 0;
+ virtual void commitAndStopDurThread(OperationContext* opCtx) = 0;
/**
* Commits pending changes, flushes all changes to main data files, then removes the
@@ -125,7 +125,7 @@ public:
* through recovery and be applied to files that have had changes made after this call
* applied.
*/
- virtual void syncDataAndTruncateJournal(OperationContext* txn) = 0;
+ virtual void syncDataAndTruncateJournal(OperationContext* opCtx) = 0;
virtual bool isDurable() const = 0;
diff --git a/src/mongo/db/storage/mmap_v1/dur_journal_writer.cpp b/src/mongo/db/storage/mmap_v1/dur_journal_writer.cpp
index 5c9fe117d52..8e7ef2a0cb5 100644
--- a/src/mongo/db/storage/mmap_v1/dur_journal_writer.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_journal_writer.cpp
@@ -56,14 +56,14 @@ namespace {
* (2) TODO should we do this using N threads? Would be quite easy see Hackenberg paper table
* 5 and 6. 2 threads might be a good balance.
*/
-void WRITETODATAFILES(OperationContext* txn,
+void WRITETODATAFILES(OperationContext* opCtx,
const JSectHeader& h,
const AlignedBuilder& uncompressed) {
Timer t;
LOG(4) << "WRITETODATAFILES BEGIN";
- RecoveryJob::get().processSection(txn, &h, uncompressed.buf(), uncompressed.len(), NULL);
+ RecoveryJob::get().processSection(opCtx, &h, uncompressed.buf(), uncompressed.len(), NULL);
const long long m = t.micros();
stats.curr()->_writeToDataFilesMicros += m;
diff --git a/src/mongo/db/storage/mmap_v1/dur_recover.cpp b/src/mongo/db/storage/mmap_v1/dur_recover.cpp
index cdd3d4e3db2..7a2b05a379f 100644
--- a/src/mongo/db/storage/mmap_v1/dur_recover.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_recover.cpp
@@ -268,23 +268,23 @@ RecoveryJob::~RecoveryJob() {
invariant(!"RecoveryJob is intentionally leaked with a bare call to operator new()");
}
-void RecoveryJob::close(OperationContext* txn) {
+void RecoveryJob::close(OperationContext* opCtx) {
stdx::lock_guard<stdx::mutex> lk(_mx);
- _close(txn);
+ _close(opCtx);
}
-void RecoveryJob::_close(OperationContext* txn) {
- MongoFile::flushAll(txn, true);
- LockMongoFilesExclusive lock(txn);
+void RecoveryJob::_close(OperationContext* opCtx) {
+ MongoFile::flushAll(opCtx, true);
+ LockMongoFilesExclusive lock(opCtx);
for (auto& durFile : _mmfs) {
- durFile->close(txn);
+ durFile->close(opCtx);
}
_mmfs.clear();
}
-RecoveryJob::Last::Last(OperationContext* txn) : _txn(txn), mmf(NULL), fileNo(-1) {
+RecoveryJob::Last::Last(OperationContext* opCtx) : _opCtx(opCtx), mmf(NULL), fileNo(-1) {
// Make sure the files list does not change from underneath
- LockMongoFilesShared::assertAtLeastReadLocked(txn);
+ LockMongoFilesShared::assertAtLeastReadLocked(opCtx);
}
DurableMappedFile* RecoveryJob::Last::newEntry(const dur::ParsedJournalEntry& entry,
@@ -296,7 +296,7 @@ DurableMappedFile* RecoveryJob::Last::newEntry(const dur::ParsedJournalEntry& en
string fn = fileName(entry.dbName, num);
MongoFile* file;
{
- MongoFileFinder finder(_txn); // must release lock before creating new DurableMappedFile
+ MongoFileFinder finder(_opCtx); // must release lock before creating new DurableMappedFile
file = finder.findByPath(fn);
}
@@ -308,8 +308,8 @@ DurableMappedFile* RecoveryJob::Last::newEntry(const dur::ParsedJournalEntry& en
log() << "journal error applying writes, file " << fn << " is not open" << endl;
verify(false);
}
- std::shared_ptr<DurableMappedFile> sp(new DurableMappedFile(_txn));
- verify(sp->open(_txn, fn));
+ std::shared_ptr<DurableMappedFile> sp(new DurableMappedFile(_opCtx));
+ verify(sp->open(_opCtx, fn));
rj._mmfs.push_back(sp);
mmf = sp.get();
}
@@ -363,14 +363,14 @@ void RecoveryJob::applyEntry(Last& last, const ParsedJournalEntry& entry, bool a
}
if (apply) {
if (entry.op->needFilesClosed()) {
- _close(last.txn()); // locked in processSection
+ _close(last.opCtx()); // locked in processSection
}
entry.op->replay();
}
}
}
-void RecoveryJob::applyEntries(OperationContext* txn, const vector<ParsedJournalEntry>& entries) {
+void RecoveryJob::applyEntries(OperationContext* opCtx, const vector<ParsedJournalEntry>& entries) {
const bool apply = (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalScanOnly) == 0;
const bool dump = (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalDumpJournal);
@@ -378,7 +378,7 @@ void RecoveryJob::applyEntries(OperationContext* txn, const vector<ParsedJournal
log() << "BEGIN section" << endl;
}
- Last last(txn);
+ Last last(opCtx);
for (vector<ParsedJournalEntry>::const_iterator i = entries.begin(); i != entries.end(); ++i) {
applyEntry(last, *i, apply, dump);
}
@@ -388,12 +388,12 @@ void RecoveryJob::applyEntries(OperationContext* txn, const vector<ParsedJournal
}
}
-void RecoveryJob::processSection(OperationContext* txn,
+void RecoveryJob::processSection(OperationContext* opCtx,
const JSectHeader* h,
const void* p,
unsigned len,
const JSectFooter* f) {
- LockMongoFilesShared lkFiles(txn); // for RecoveryJob::Last
+ LockMongoFilesShared lkFiles(opCtx); // for RecoveryJob::Last
stdx::lock_guard<stdx::mutex> lk(_mx);
if (_recovering) {
@@ -467,14 +467,14 @@ void RecoveryJob::processSection(OperationContext* txn,
}
// got all the entries for one group commit. apply them:
- applyEntries(txn, entries);
+ applyEntries(opCtx, entries);
}
/** apply a specific journal file, that is already mmap'd
@param p start of the memory mapped file
@return true if this is detected to be the last file (ends abruptly)
*/
-bool RecoveryJob::processFileBuffer(OperationContext* txn, const void* p, unsigned len) {
+bool RecoveryJob::processFileBuffer(OperationContext* opCtx, const void* p, unsigned len) {
try {
unsigned long long fileId;
BufReader br(p, len);
@@ -529,7 +529,8 @@ bool RecoveryJob::processFileBuffer(OperationContext* txn, const void* p, unsign
const char* hdr = (const char*)br.skip(h.sectionLenWithPadding());
const char* data = hdr + sizeof(JSectHeader);
const char* footer = data + dataLen;
- processSection(txn, (const JSectHeader*)hdr, data, dataLen, (const JSectFooter*)footer);
+ processSection(
+ opCtx, (const JSectHeader*)hdr, data, dataLen, (const JSectFooter*)footer);
// ctrl c check
uassert(ErrorCodes::Interrupted,
@@ -550,7 +551,7 @@ bool RecoveryJob::processFileBuffer(OperationContext* txn, const void* p, unsign
}
/** apply a specific journal file */
-bool RecoveryJob::processFile(OperationContext* txn, boost::filesystem::path journalfile) {
+bool RecoveryJob::processFile(OperationContext* opCtx, boost::filesystem::path journalfile) {
log() << "recover " << journalfile.string() << endl;
try {
@@ -564,20 +565,20 @@ bool RecoveryJob::processFile(OperationContext* txn, boost::filesystem::path jou
log() << "recover exception checking filesize" << endl;
}
- MemoryMappedFile f{txn, MongoFile::Options::READONLY | MongoFile::Options::SEQUENTIAL};
- ON_BLOCK_EXIT([&f, &txn] {
- LockMongoFilesExclusive lock(txn);
- f.close(txn);
+ MemoryMappedFile f{opCtx, MongoFile::Options::READONLY | MongoFile::Options::SEQUENTIAL};
+ ON_BLOCK_EXIT([&f, &opCtx] {
+ LockMongoFilesExclusive lock(opCtx);
+ f.close(opCtx);
});
- void* p = f.map(txn, journalfile.string().c_str());
+ void* p = f.map(opCtx, journalfile.string().c_str());
massert(13544, str::stream() << "recover error couldn't open " << journalfile.string(), p);
- return processFileBuffer(txn, p, (unsigned)f.length());
+ return processFileBuffer(opCtx, p, (unsigned)f.length());
}
/** @param files all the j._0 style files we need to apply for recovery */
-void RecoveryJob::go(OperationContext* txn, vector<boost::filesystem::path>& files) {
+void RecoveryJob::go(OperationContext* opCtx, vector<boost::filesystem::path>& files) {
log() << "recover begin" << endl;
- LockMongoFilesExclusive lkFiles(txn); // for RecoveryJob::Last
+ LockMongoFilesExclusive lkFiles(opCtx); // for RecoveryJob::Last
_recovering = true;
// load the last sequence number synced to the datafiles on disk before the last crash
@@ -585,11 +586,11 @@ void RecoveryJob::go(OperationContext* txn, vector<boost::filesystem::path>& fil
log() << "recover lsn: " << _lastDataSyncedFromLastRun << endl;
for (unsigned i = 0; i != files.size(); ++i) {
- bool abruptEnd = processFile(txn, files[i]);
+ bool abruptEnd = processFile(opCtx, files[i]);
if (abruptEnd && i + 1 < files.size()) {
log() << "recover error: abrupt end to file " << files[i].string()
<< ", yet it isn't the last journal file" << endl;
- close(txn);
+ close(opCtx);
uasserted(13535, "recover abrupt journal file end");
}
}
@@ -600,7 +601,7 @@ void RecoveryJob::go(OperationContext* txn, vector<boost::filesystem::path>& fil
<< "Last skipped sections had sequence number " << _lastSeqSkipped;
}
- close(txn);
+ close(opCtx);
if (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalScanOnly) {
uasserted(13545,
@@ -615,7 +616,7 @@ void RecoveryJob::go(OperationContext* txn, vector<boost::filesystem::path>& fil
_recovering = false;
}
-void _recover(OperationContext* txn) {
+void _recover(OperationContext* opCtx) {
verify(storageGlobalParams.dur);
boost::filesystem::path p = getJournalDir();
@@ -635,7 +636,7 @@ void _recover(OperationContext* txn) {
return;
}
- RecoveryJob::get().go(txn, journalFiles);
+ RecoveryJob::get().go(opCtx, journalFiles);
}
/** recover from a crash
@@ -645,11 +646,11 @@ void _recover(OperationContext* txn) {
void replayJournalFilesAtStartup() {
// we use a lock so that exitCleanly will wait for us
// to finish (or at least to notice what is up and stop)
- auto txn = cc().makeOperationContext();
- ScopedTransaction transaction(txn.get(), MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
+ auto opCtx = cc().makeOperationContext();
+ ScopedTransaction transaction(opCtx.get(), MODE_X);
+ Lock::GlobalWrite lk(opCtx->lockState());
- _recover(txn.get()); // throws on interruption
+ _recover(opCtx.get()); // throws on interruption
}
struct BufReaderY {
diff --git a/src/mongo/db/storage/mmap_v1/dur_recover.h b/src/mongo/db/storage/mmap_v1/dur_recover.h
index 9447044b607..79ce0b03e5d 100644
--- a/src/mongo/db/storage/mmap_v1/dur_recover.h
+++ b/src/mongo/db/storage/mmap_v1/dur_recover.h
@@ -56,17 +56,17 @@ public:
RecoveryJob();
~RecoveryJob();
- void go(OperationContext* txn, std::vector<boost::filesystem::path>& files);
+ void go(OperationContext* opCtx, std::vector<boost::filesystem::path>& files);
/** @param data data between header and footer. compressed if recovering. */
- void processSection(OperationContext* txn,
+ void processSection(OperationContext* opCtx,
const JSectHeader* h,
const void* data,
unsigned len,
const JSectFooter* f);
// locks and calls _close()
- void close(OperationContext* txn);
+ void close(OperationContext* opCtx);
static RecoveryJob& get() {
return _instance;
@@ -75,16 +75,16 @@ public:
private:
class Last {
public:
- Last(OperationContext* txn);
+ Last(OperationContext* opCtx);
DurableMappedFile* newEntry(const ParsedJournalEntry&, RecoveryJob&);
- OperationContext* txn() {
- return _txn;
+ OperationContext* opCtx() {
+ return _opCtx;
}
private:
- OperationContext* _txn;
+ OperationContext* _opCtx;
DurableMappedFile* mmf;
std::string dbName;
int fileNo;
@@ -93,10 +93,10 @@ private:
void write(Last& last, const ParsedJournalEntry& entry); // actually writes to the file
void applyEntry(Last& last, const ParsedJournalEntry& entry, bool apply, bool dump);
- void applyEntries(OperationContext* txn, const std::vector<ParsedJournalEntry>& entries);
- bool processFileBuffer(OperationContext* txn, const void*, unsigned len);
- bool processFile(OperationContext* txn, boost::filesystem::path journalfile);
- void _close(OperationContext* txn); // doesn't lock
+ void applyEntries(OperationContext* opCtx, const std::vector<ParsedJournalEntry>& entries);
+ bool processFileBuffer(OperationContext* opCtx, const void*, unsigned len);
+ bool processFile(OperationContext* opCtx, boost::filesystem::path journalfile);
+ void _close(OperationContext* opCtx); // doesn't lock
// Set of memory mapped files and a mutex to protect them
stdx::mutex _mx;
diff --git a/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp b/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
index 548cb8c9f05..fd199817f11 100644
--- a/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
+++ b/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
@@ -61,7 +61,7 @@ using std::map;
using std::pair;
using std::string;
-void DurableMappedFile::remapThePrivateView(OperationContext* txn) {
+void DurableMappedFile::remapThePrivateView(OperationContext* opCtx) {
verify(storageGlobalParams.dur);
_willNeedRemap = false;
@@ -70,7 +70,7 @@ void DurableMappedFile::remapThePrivateView(OperationContext* txn) {
// so the remove / add isn't necessary and can be removed?
void* old = _view_private;
// privateViews.remove(_view_private);
- _view_private = remapPrivateView(txn, _view_private);
+ _view_private = remapPrivateView(opCtx, _view_private);
// privateViews.add(_view_private, this);
fassert(16112, _view_private == old);
}
@@ -241,24 +241,24 @@ void DurableMappedFile::setPath(const std::string& f) {
_p = RelativePath::fromFullPath(storageGlobalParams.dbpath, prefix);
}
-bool DurableMappedFile::open(OperationContext* txn, const std::string& fname) {
+bool DurableMappedFile::open(OperationContext* opCtx, const std::string& fname) {
LOG(3) << "mmf open " << fname;
invariant(!_view_write);
setPath(fname);
- _view_write = map(txn, fname.c_str());
+ _view_write = map(opCtx, fname.c_str());
fassert(16333, _view_write);
return finishOpening();
}
-bool DurableMappedFile::create(OperationContext* txn,
+bool DurableMappedFile::create(OperationContext* opCtx,
const std::string& fname,
unsigned long long& len) {
LOG(3) << "mmf create " << fname;
invariant(!_view_write);
setPath(fname);
- _view_write = map(txn, fname.c_str(), len);
+ _view_write = map(opCtx, fname.c_str(), len);
fassert(16332, _view_write);
return finishOpening();
}
@@ -285,7 +285,7 @@ bool DurableMappedFile::finishOpening() {
return false;
}
-void DurableMappedFile::close(OperationContext* txn) {
+void DurableMappedFile::close(OperationContext* opCtx) {
try {
LOG(3) << "mmf close " << filename();
@@ -298,14 +298,14 @@ void DurableMappedFile::close(OperationContext* txn) {
privateViews.remove(_view_private, length());
- MemoryMappedFile::close(txn);
+ MemoryMappedFile::close(opCtx);
} catch (...) {
error() << "exception in DurableMappedFile::close";
}
}
-DurableMappedFile::DurableMappedFile(OperationContext* txn, OptionSet options)
- : MemoryMappedFile(txn, options), _willNeedRemap(false) {
+DurableMappedFile::DurableMappedFile(OperationContext* opCtx, OptionSet options)
+ : MemoryMappedFile(opCtx, options), _willNeedRemap(false) {
_view_write = _view_private = 0;
}
diff --git a/src/mongo/db/storage/mmap_v1/durable_mapped_file.h b/src/mongo/db/storage/mmap_v1/durable_mapped_file.h
index 3b9b41dab86..7050156fd25 100644
--- a/src/mongo/db/storage/mmap_v1/durable_mapped_file.h
+++ b/src/mongo/db/storage/mmap_v1/durable_mapped_file.h
@@ -51,16 +51,16 @@ protected:
}
public:
- explicit DurableMappedFile(OperationContext* txn, OptionSet options = NONE);
+ explicit DurableMappedFile(OperationContext* opCtx, OptionSet options = NONE);
virtual ~DurableMappedFile();
/**
* Callers must be holding a `LockMongoFilesExclusive`.
*/
- virtual void close(OperationContext* txn);
+ virtual void close(OperationContext* opCtx);
/** @return true if opened ok. */
- bool open(OperationContext* txn, const std::string& fname);
+ bool open(OperationContext* opCtx, const std::string& fname);
/** @return file length */
unsigned long long length() const {
@@ -79,7 +79,7 @@ public:
passed length.
@return true for ok
*/
- bool create(OperationContext* txn, const std::string& fname, unsigned long long& len);
+ bool create(OperationContext* opCtx, const std::string& fname, unsigned long long& len);
/* Get the "standard" view (which is the private one).
@return the private view.
@@ -123,7 +123,7 @@ public:
_willNeedRemap = true;
}
- void remapThePrivateView(OperationContext* txn);
+ void remapThePrivateView(OperationContext* opCtx);
virtual bool isDurableMappedFile() {
return true;
diff --git a/src/mongo/db/storage/mmap_v1/extent_manager.h b/src/mongo/db/storage/mmap_v1/extent_manager.h
index 1ca0ab7b9f1..6b0e18c44f3 100644
--- a/src/mongo/db/storage/mmap_v1/extent_manager.h
+++ b/src/mongo/db/storage/mmap_v1/extent_manager.h
@@ -77,18 +77,18 @@ public:
virtual ~ExtentManager() {}
- virtual void close(OperationContext* txn) = 0;
+ virtual void close(OperationContext* opCtx) = 0;
/**
* opens all current files
*/
- virtual Status init(OperationContext* txn) = 0;
+ virtual Status init(OperationContext* opCtx) = 0;
virtual int numFiles() const = 0;
virtual long long fileSize() const = 0;
// must call Extent::reuse on the returned extent
- virtual DiskLoc allocateExtent(OperationContext* txn,
+ virtual DiskLoc allocateExtent(OperationContext* opCtx,
bool capped,
int size,
bool enforceQuota) = 0;
@@ -96,13 +96,13 @@ public:
/**
* firstExt has to be == lastExt or a chain
*/
- virtual void freeExtents(OperationContext* txn, DiskLoc firstExt, DiskLoc lastExt) = 0;
+ virtual void freeExtents(OperationContext* opCtx, DiskLoc firstExt, DiskLoc lastExt) = 0;
/**
* frees a single extent
* ignores all fields in the Extent except: magic, myLoc, length
*/
- virtual void freeExtent(OperationContext* txn, DiskLoc extent) = 0;
+ virtual void freeExtent(OperationContext* opCtx, DiskLoc extent) = 0;
/**
* Retrieve statistics on the the free list managed by this ExtentManger.
@@ -110,7 +110,7 @@ public:
* @param totalFreeSizeBytes - non-null pointer to an int64_t receiving the total free
* space in the free list.
*/
- virtual void freeListStats(OperationContext* txn,
+ virtual void freeListStats(OperationContext* opCtx,
int* numExtents,
int64_t* totalFreeSizeBytes) const = 0;
@@ -188,8 +188,8 @@ public:
*/
virtual CacheHint* cacheHint(const DiskLoc& extentLoc, const HintType& hint) = 0;
- virtual DataFileVersion getFileFormat(OperationContext* txn) const = 0;
- virtual void setFileFormat(OperationContext* txn, DataFileVersion newVersion) = 0;
+ virtual DataFileVersion getFileFormat(OperationContext* opCtx) const = 0;
+ virtual void setFileFormat(OperationContext* opCtx, DataFileVersion newVersion) = 0;
virtual const DataFile* getOpenFile(int n) const = 0;
};
diff --git a/src/mongo/db/storage/mmap_v1/heap_record_store_btree.cpp b/src/mongo/db/storage/mmap_v1/heap_record_store_btree.cpp
index eaf0981c688..bf761f2f2f0 100644
--- a/src/mongo/db/storage/mmap_v1/heap_record_store_btree.cpp
+++ b/src/mongo/db/storage/mmap_v1/heap_record_store_btree.cpp
@@ -42,7 +42,7 @@
namespace mongo {
-RecordData HeapRecordStoreBtree::dataFor(OperationContext* txn, const RecordId& loc) const {
+RecordData HeapRecordStoreBtree::dataFor(OperationContext* opCtx, const RecordId& loc) const {
Records::const_iterator it = _records.find(loc);
invariant(it != _records.end());
const MmapV1RecordHeader& rec = it->second;
@@ -50,7 +50,7 @@ RecordData HeapRecordStoreBtree::dataFor(OperationContext* txn, const RecordId&
return RecordData(rec.data.get(), rec.dataSize);
}
-bool HeapRecordStoreBtree::findRecord(OperationContext* txn,
+bool HeapRecordStoreBtree::findRecord(OperationContext* opCtx,
const RecordId& loc,
RecordData* out) const {
Records::const_iterator it = _records.find(loc);
@@ -61,11 +61,11 @@ bool HeapRecordStoreBtree::findRecord(OperationContext* txn,
return true;
}
-void HeapRecordStoreBtree::deleteRecord(OperationContext* txn, const RecordId& loc) {
+void HeapRecordStoreBtree::deleteRecord(OperationContext* opCtx, const RecordId& loc) {
invariant(_records.erase(loc) == 1);
}
-StatusWith<RecordId> HeapRecordStoreBtree::insertRecord(OperationContext* txn,
+StatusWith<RecordId> HeapRecordStoreBtree::insertRecord(OperationContext* opCtx,
const char* data,
int len,
bool enforceQuota) {
@@ -75,12 +75,12 @@ StatusWith<RecordId> HeapRecordStoreBtree::insertRecord(OperationContext* txn,
const RecordId loc = allocateLoc();
_records[loc] = rec;
- HeapRecordStoreBtreeRecoveryUnit::notifyInsert(txn, this, loc);
+ HeapRecordStoreBtreeRecoveryUnit::notifyInsert(opCtx, this, loc);
return StatusWith<RecordId>(loc);
}
-Status HeapRecordStoreBtree::insertRecordsWithDocWriter(OperationContext* txn,
+Status HeapRecordStoreBtree::insertRecordsWithDocWriter(OperationContext* opCtx,
const DocWriter* const* docs,
size_t nDocs,
RecordId* idsOut) {
@@ -96,7 +96,7 @@ Status HeapRecordStoreBtree::insertRecordsWithDocWriter(OperationContext* txn,
_records[loc] = rec;
*idsOut = loc;
- HeapRecordStoreBtreeRecoveryUnit::notifyInsert(txn, this, loc);
+ HeapRecordStoreBtreeRecoveryUnit::notifyInsert(opCtx, this, loc);
return Status::OK();
}
@@ -111,7 +111,7 @@ RecordId HeapRecordStoreBtree::allocateLoc() {
return dl;
}
-Status HeapRecordStoreBtree::touch(OperationContext* txn, BSONObjBuilder* output) const {
+Status HeapRecordStoreBtree::touch(OperationContext* opCtx, BSONObjBuilder* output) const {
// not currently called from the tests, but called from btree_logic.h
return Status::OK();
}
diff --git a/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h b/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h
index e01b85db55c..07583680edf 100644
--- a/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h
+++ b/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h
@@ -49,33 +49,33 @@ public:
// RecordId(0,0) isn't valid for records.
explicit HeapRecordStoreBtree(StringData ns) : RecordStore(ns), _nextId(1) {}
- virtual RecordData dataFor(OperationContext* txn, const RecordId& loc) const;
+ virtual RecordData dataFor(OperationContext* opCtx, const RecordId& loc) const;
- virtual bool findRecord(OperationContext* txn, const RecordId& loc, RecordData* out) const;
+ virtual bool findRecord(OperationContext* opCtx, const RecordId& loc, RecordData* out) const;
- virtual void deleteRecord(OperationContext* txn, const RecordId& dl);
+ virtual void deleteRecord(OperationContext* opCtx, const RecordId& dl);
- virtual StatusWith<RecordId> insertRecord(OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord(OperationContext* opCtx,
const char* data,
int len,
bool enforceQuota);
- virtual Status insertRecordsWithDocWriter(OperationContext* txn,
+ virtual Status insertRecordsWithDocWriter(OperationContext* opCtx,
const DocWriter* const* docs,
size_t nDocs,
RecordId* idsOut);
- virtual long long numRecords(OperationContext* txn) const {
+ virtual long long numRecords(OperationContext* opCtx) const {
return _records.size();
}
- virtual Status touch(OperationContext* txn, BSONObjBuilder* output) const;
+ virtual Status touch(OperationContext* opCtx, BSONObjBuilder* output) const;
// public methods below here are not necessary to test btree, and will crash when called.
// ------------------------------
- virtual Status updateRecord(OperationContext* txn,
+ virtual Status updateRecord(OperationContext* opCtx,
const RecordId& oldLocation,
const char* data,
int len,
@@ -88,7 +88,7 @@ public:
return true;
}
- virtual StatusWith<RecordData> updateWithDamages(OperationContext* txn,
+ virtual StatusWith<RecordData> updateWithDamages(OperationContext* opCtx,
const RecordId& loc,
const RecordData& oldRec,
const char* damageSource,
@@ -96,17 +96,17 @@ public:
invariant(false);
}
- std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* txn,
+ std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* opCtx,
bool forward) const final {
invariant(false);
}
- virtual Status truncate(OperationContext* txn) {
+ virtual Status truncate(OperationContext* opCtx) {
invariant(false);
}
- virtual void cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive) {
+ virtual void cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) {
invariant(false);
}
@@ -114,7 +114,7 @@ public:
invariant(false);
}
- virtual Status validate(OperationContext* txn,
+ virtual Status validate(OperationContext* opCtx,
ValidateCmdLevel level,
ValidateAdaptor* adaptor,
ValidateResults* results,
@@ -122,23 +122,23 @@ public:
invariant(false);
}
- virtual void appendCustomStats(OperationContext* txn,
+ virtual void appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* result,
double scale) const {
invariant(false);
}
- virtual void increaseStorageSize(OperationContext* txn, int size, bool enforceQuota) {
+ virtual void increaseStorageSize(OperationContext* opCtx, int size, bool enforceQuota) {
invariant(false);
}
- virtual int64_t storageSize(OperationContext* txn,
+ virtual int64_t storageSize(OperationContext* opCtx,
BSONObjBuilder* extraInfo = NULL,
int infoLevel = 0) const {
invariant(false);
}
- virtual long long dataSize(OperationContext* txn) const {
+ virtual long long dataSize(OperationContext* opCtx) const {
invariant(false);
}
@@ -154,11 +154,11 @@ public:
invariant(false);
}
- void waitForAllEarlierOplogWritesToBeVisible(OperationContext* txn) const override {
+ void waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx) const override {
invariant(false);
}
- virtual void updateStatsAfterRepair(OperationContext* txn,
+ virtual void updateStatsAfterRepair(OperationContext* opCtx,
long long numRecords,
long long dataSize) {
invariant(false);
diff --git a/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp b/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp
index 279abae4a38..e4f5452e123 100644
--- a/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp
+++ b/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp
@@ -84,7 +84,7 @@ public:
virtual void addRequiredPrivileges(const std::string& dbname,
const BSONObj& cmdObj,
std::vector<Privilege>* out) {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
diff --git a/src/mongo/db/storage/mmap_v1/mmap.cpp b/src/mongo/db/storage/mmap_v1/mmap.cpp
index bdce2bd6468..f8d12295ce3 100644
--- a/src/mongo/db/storage/mmap_v1/mmap.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap.cpp
@@ -69,18 +69,18 @@ map<string, MongoFile*> pathToFile;
mongo::AtomicUInt64 mmfNextId(0);
} // namespace
-MemoryMappedFile::MemoryMappedFile(OperationContext* txn, OptionSet options)
+MemoryMappedFile::MemoryMappedFile(OperationContext* opCtx, OptionSet options)
: MongoFile(options), _uniqueId(mmfNextId.fetchAndAdd(1)) {
- created(txn);
+ created(opCtx);
}
MemoryMappedFile::~MemoryMappedFile() {
invariant(isClosed());
- auto txn = cc().getOperationContext();
- invariant(txn);
+ auto opCtx = cc().getOperationContext();
+ invariant(opCtx);
- LockMongoFilesShared lock(txn);
+ LockMongoFilesShared lock(opCtx);
for (std::set<MongoFile*>::const_iterator it = mmfiles.begin(); it != mmfiles.end(); it++) {
invariant(*it != this);
}
@@ -88,14 +88,14 @@ MemoryMappedFile::~MemoryMappedFile() {
/*static*/ AtomicUInt64 MemoryMappedFile::totalMappedLength;
-void* MemoryMappedFile::create(OperationContext* txn,
+void* MemoryMappedFile::create(OperationContext* opCtx,
const std::string& filename,
unsigned long long len,
bool zero) {
uassert(13468,
string("can't create file already exists ") + filename,
!boost::filesystem::exists(filename));
- void* p = map(txn, filename.c_str(), len);
+ void* p = map(opCtx, filename.c_str(), len);
fassert(16331, p);
if (zero) {
size_t sz = (size_t)len;
@@ -113,7 +113,7 @@ void* MemoryMappedFile::create(OperationContext* txn,
length = l;
}
-void* MemoryMappedFile::map(OperationContext* txn, const char* filename) {
+void* MemoryMappedFile::map(OperationContext* opCtx, const char* filename) {
unsigned long long l;
try {
l = boost::filesystem::file_size(filename);
@@ -125,7 +125,7 @@ void* MemoryMappedFile::map(OperationContext* txn, const char* filename) {
<< e.what());
}
- void* ret = map(txn, filename, l);
+ void* ret = map(opCtx, filename, l);
fassert(16334, ret);
return ret;
}
@@ -150,14 +150,14 @@ set<MongoFile*>& MongoFile::getAllFiles() {
safe to call more than once, albeit might be wasted work
ideal to call close to the close, if the close is well before object destruction
*/
-void MongoFile::destroyed(OperationContext* txn) {
- LockMongoFilesShared::assertExclusivelyLocked(txn);
+void MongoFile::destroyed(OperationContext* opCtx) {
+ LockMongoFilesShared::assertExclusivelyLocked(opCtx);
mmfiles.erase(this);
pathToFile.erase(filename());
}
/*static*/
-void MongoFile::closeAllFiles(OperationContext* txn, stringstream& message) {
+void MongoFile::closeAllFiles(OperationContext* opCtx, stringstream& message) {
static int closingAllFiles = 0;
if (closingAllFiles) {
message << "warning closingAllFiles=" << closingAllFiles << endl;
@@ -165,26 +165,26 @@ void MongoFile::closeAllFiles(OperationContext* txn, stringstream& message) {
}
++closingAllFiles;
- LockMongoFilesExclusive lk(txn);
+ LockMongoFilesExclusive lk(opCtx);
ProgressMeter pm(mmfiles.size(), 2, 1, "files", "File Closing Progress");
set<MongoFile*> temp = mmfiles;
for (set<MongoFile*>::iterator i = temp.begin(); i != temp.end(); i++) {
- (*i)->close(txn); // close() now removes from mmfiles
+ (*i)->close(opCtx); // close() now removes from mmfiles
pm.hit();
}
message << "closeAllFiles() finished";
--closingAllFiles;
}
-/*static*/ int MongoFile::flushAll(OperationContext* txn, bool sync) {
- return _flushAll(txn, sync);
+/*static*/ int MongoFile::flushAll(OperationContext* opCtx, bool sync) {
+ return _flushAll(opCtx, sync);
}
-/*static*/ int MongoFile::_flushAll(OperationContext* txn, bool sync) {
+/*static*/ int MongoFile::_flushAll(OperationContext* opCtx, bool sync) {
if (!sync) {
int num = 0;
- LockMongoFilesShared lk(txn);
+ LockMongoFilesShared lk(opCtx);
for (set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++) {
num++;
MongoFile* mmf = *i;
@@ -204,7 +204,7 @@ void MongoFile::closeAllFiles(OperationContext* txn, stringstream& message) {
OwnedPointerVector<Flushable> thingsToFlushWrapper;
vector<Flushable*>& thingsToFlush = thingsToFlushWrapper.mutableVector();
{
- LockMongoFilesShared lk(txn);
+ LockMongoFilesShared lk(opCtx);
for (set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++) {
MongoFile* mmf = *i;
if (!mmf)
@@ -214,22 +214,22 @@ void MongoFile::closeAllFiles(OperationContext* txn, stringstream& message) {
}
for (size_t i = 0; i < thingsToFlush.size(); i++) {
- thingsToFlush[i]->flush(txn);
+ thingsToFlush[i]->flush(opCtx);
}
return thingsToFlush.size();
}
-void MongoFile::created(OperationContext* txn) {
+void MongoFile::created(OperationContext* opCtx) {
// If we're a READONLY mapping, we don't want to ever flush.
if (!isOptionSet(READONLY)) {
- LockMongoFilesExclusive lk(txn);
+ LockMongoFilesExclusive lk(opCtx);
mmfiles.insert(this);
}
}
-void MongoFile::setFilename(OperationContext* txn, const std::string& fn) {
- LockMongoFilesExclusive lk(txn);
+void MongoFile::setFilename(OperationContext* opCtx, const std::string& fn) {
+ LockMongoFilesExclusive lk(opCtx);
verify(_filename.empty());
_filename = boost::filesystem::absolute(fn).generic_string();
MongoFile*& ptf = pathToFile[_filename];
diff --git a/src/mongo/db/storage/mmap_v1/mmap.h b/src/mongo/db/storage/mmap_v1/mmap.h
index fc28d56e1d9..b1b32e37d7d 100644
--- a/src/mongo/db/storage/mmap_v1/mmap.h
+++ b/src/mongo/db/storage/mmap_v1/mmap.h
@@ -71,17 +71,17 @@ class LockMongoFilesShared {
Lock::SharedLock lk;
public:
- explicit LockMongoFilesShared(OperationContext* txn) : lk(txn->lockState(), mmmutex) {
+ explicit LockMongoFilesShared(OperationContext* opCtx) : lk(opCtx->lockState(), mmmutex) {
// JS worker threads may not have cc() setup, as they work on behalf of other clients
- dassert(txn == cc().getOperationContext() || !cc().getOperationContext());
+ dassert(opCtx == cc().getOperationContext() || !cc().getOperationContext());
}
- static void assertExclusivelyLocked(OperationContext* txn) {
- invariant(mmmutex.isExclusivelyLocked(txn->lockState()));
+ static void assertExclusivelyLocked(OperationContext* opCtx) {
+ invariant(mmmutex.isExclusivelyLocked(opCtx->lockState()));
}
- static void assertAtLeastReadLocked(OperationContext* txn) {
- invariant(mmmutex.isAtLeastReadLocked(txn->lockState()));
+ static void assertAtLeastReadLocked(OperationContext* opCtx) {
+ invariant(mmmutex.isAtLeastReadLocked(opCtx->lockState()));
}
/** era changes anytime memory maps come and go. thus you can use this as a cheap way to check
@@ -99,10 +99,10 @@ class LockMongoFilesExclusive {
Lock::ExclusiveLock lk;
public:
- explicit LockMongoFilesExclusive(OperationContext* txn)
- : lk(txn->lockState(), LockMongoFilesShared::mmmutex) {
+ explicit LockMongoFilesExclusive(OperationContext* opCtx)
+ : lk(opCtx->lockState(), LockMongoFilesShared::mmmutex) {
// JS worker threads may not have cc() setup, as they work on behalf of other clients
- dassert(txn == cc().getOperationContext() || !cc().getOperationContext());
+ dassert(opCtx == cc().getOperationContext() || !cc().getOperationContext());
LockMongoFilesShared::era++;
}
};
@@ -116,7 +116,7 @@ public:
class Flushable {
public:
virtual ~Flushable() {}
- virtual void flush(OperationContext* txn) = 0;
+ virtual void flush(OperationContext* opCtx) = 0;
};
enum Options {
@@ -135,7 +135,7 @@ public:
called from within a mutex that MongoFile uses. so be careful not to deadlock.
*/
template <class F>
- static void forEach(OperationContext* txn, F fun);
+ static void forEach(OperationContext* opCtx, F fun);
/**
* note: you need to be in mmmutex when using this. forEach (above) handles that for you
@@ -143,8 +143,8 @@ public:
*/
static std::set<MongoFile*>& getAllFiles();
- static int flushAll(OperationContext* txn, bool sync); // returns n flushed
- static void closeAllFiles(OperationContext* txn, std::stringstream& message);
+ static int flushAll(OperationContext* opCtx, bool sync); // returns n flushed
+ static void closeAllFiles(OperationContext* opCtx, std::stringstream& message);
virtual bool isDurableMappedFile() {
return false;
@@ -153,20 +153,20 @@ public:
std::string filename() const {
return _filename;
}
- void setFilename(OperationContext* txn, const std::string& fn);
+ void setFilename(OperationContext* opCtx, const std::string& fn);
virtual uint64_t getUniqueId() const = 0;
private:
std::string _filename;
- static int _flushAll(OperationContext* txn, bool sync); // returns n flushed
+ static int _flushAll(OperationContext* opCtx, bool sync); // returns n flushed
const OptionSet _options;
protected:
/**
* Implementations may assume this is called from within `LockMongoFilesExclusive`.
*/
- virtual void close(OperationContext* txn) = 0;
+ virtual void close(OperationContext* opCtx) = 0;
virtual void flush(bool sync) = 0;
/**
* returns a thread safe object that you can call flush on
@@ -179,7 +179,7 @@ protected:
*/
virtual bool isClosed() = 0;
- void created(OperationContext* txn); /* subclass must call after create */
+ void created(OperationContext* opCtx); /* subclass must call after create */
/**
* Implementations may assume this is called from within `LockMongoFilesExclusive`.
@@ -189,7 +189,7 @@ protected:
* safe to call more than once, albeit might be wasted work
* ideal to call close to the close, if the close is well before object destruction
*/
- void destroyed(OperationContext* txn);
+ void destroyed(OperationContext* opCtx);
virtual unsigned long long length() const = 0;
@@ -208,7 +208,7 @@ class MongoFileFinder {
MONGO_DISALLOW_COPYING(MongoFileFinder);
public:
- MongoFileFinder(OperationContext* txn) : _lk(txn) {}
+ MongoFileFinder(OperationContext* opCtx) : _lk(opCtx) {}
/** @return The MongoFile object associated with the specified file name. If no file is open
with the specified name, returns null.
@@ -229,25 +229,25 @@ protected:
}
public:
- MemoryMappedFile(OperationContext* txn, OptionSet options = NONE);
+ MemoryMappedFile(OperationContext* opCtx, OptionSet options = NONE);
virtual ~MemoryMappedFile();
/**
* Callers must be holding a `LockMongoFilesExclusive`.
*/
- virtual void close(OperationContext* txn);
+ virtual void close(OperationContext* opCtx);
/**
* uasserts if file doesn't exist. fasserts on mmap error.
*/
- void* map(OperationContext* txn, const char* filename);
+ void* map(OperationContext* opCtx, const char* filename);
/**
* uasserts if file exists. fasserts on mmap error.
* @param zero fill file with zeros when true
*/
- void* create(OperationContext* txn,
+ void* create(OperationContext* opCtx,
const std::string& filename,
unsigned long long len,
bool zero);
@@ -307,18 +307,18 @@ protected:
* Creates with length if DNE, otherwise validates input length. Returns nullptr on mmap
* error.
*/
- void* map(OperationContext* txn, const char* filename, unsigned long long& length);
+ void* map(OperationContext* opCtx, const char* filename, unsigned long long& length);
/**
* Close the current private view and open a new replacement. Returns nullptr on mmap error.
*/
- void* remapPrivateView(OperationContext* txn, void* oldPrivateAddr);
+ void* remapPrivateView(OperationContext* opCtx, void* oldPrivateAddr);
};
/** p is called from within a mutex that MongoFile uses. so be careful not to deadlock. */
template <class F>
-inline void MongoFile::forEach(OperationContext* txn, F p) {
- LockMongoFilesShared lklk(txn);
+inline void MongoFile::forEach(OperationContext* opCtx, F p) {
+ LockMongoFilesShared lklk(opCtx);
const std::set<MongoFile*>& mmfiles = MongoFile::getAllFiles();
for (std::set<MongoFile*>::const_iterator i = mmfiles.begin(); i != mmfiles.end(); i++)
p(*i);
diff --git a/src/mongo/db/storage/mmap_v1/mmap_posix.cpp b/src/mongo/db/storage/mmap_v1/mmap_posix.cpp
index 02589421b44..2a9c1cc0458 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_posix.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_posix.cpp
@@ -79,7 +79,7 @@ static size_t fetchMinOSPageSizeBytes() {
const size_t g_minOSPageSizeBytes = fetchMinOSPageSizeBytes();
-void MemoryMappedFile::close(OperationContext* txn) {
+void MemoryMappedFile::close(OperationContext* opCtx) {
for (vector<void*>::iterator i = views.begin(); i != views.end(); i++) {
munmap(*i, len);
}
@@ -91,7 +91,7 @@ void MemoryMappedFile::close(OperationContext* txn) {
::close(fd);
fd = 0;
}
- destroyed(txn); // cleans up from the master list of mmaps
+ destroyed(opCtx); // cleans up from the master list of mmaps
}
#ifndef O_NOATIME
@@ -159,11 +159,11 @@ MAdvise::~MAdvise() {
}
#endif
-void* MemoryMappedFile::map(OperationContext* txn,
+void* MemoryMappedFile::map(OperationContext* opCtx,
const char* filename,
unsigned long long& length) {
// length may be updated by callee.
- setFilename(txn, filename);
+ setFilename(opCtx, filename);
FileAllocator::get()->allocateAsap(filename, length);
const bool readOnly = isOptionSet(READONLY);
@@ -243,9 +243,9 @@ void* MemoryMappedFile::createPrivateMap() {
return x;
}
-void* MemoryMappedFile::remapPrivateView(OperationContext* txn, void* oldPrivateAddr) {
+void* MemoryMappedFile::remapPrivateView(OperationContext* opCtx, void* oldPrivateAddr) {
#if defined(__sun) // SERVER-8795
- LockMongoFilesExclusive lockMongoFiles(txn);
+ LockMongoFilesExclusive lockMongoFiles(opCtx);
#endif
// don't unmap, just mmap over the old region
@@ -288,7 +288,7 @@ public:
PosixFlushable(MemoryMappedFile* theFile, void* view, HANDLE fd, long len)
: _theFile(theFile), _view(view), _fd(fd), _len(len), _id(_theFile->getUniqueId()) {}
- void flush(OperationContext* txn) {
+ void flush(OperationContext* opCtx) {
if (_view == NULL || _fd == 0)
return;
@@ -303,7 +303,7 @@ public:
}
// some error, lets see if we're supposed to exist
- LockMongoFilesShared mmfilesLock(txn);
+ LockMongoFilesShared mmfilesLock(opCtx);
std::set<MongoFile*> mmfs = MongoFile::getAllFiles();
std::set<MongoFile*>::const_iterator it = mmfs.find(_theFile);
if ((it == mmfs.end()) || ((*it)->getUniqueId() != _id)) {
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
index 69978fb4b53..c88ed7545c4 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
@@ -155,7 +155,7 @@ private:
Entry* const _cachedEntry;
};
-MMAPV1DatabaseCatalogEntry::MMAPV1DatabaseCatalogEntry(OperationContext* txn,
+MMAPV1DatabaseCatalogEntry::MMAPV1DatabaseCatalogEntry(OperationContext* opCtx,
StringData name,
StringData path,
bool directoryPerDB,
@@ -163,32 +163,32 @@ MMAPV1DatabaseCatalogEntry::MMAPV1DatabaseCatalogEntry(OperationContext* txn,
std::unique_ptr<ExtentManager> extentManager)
: DatabaseCatalogEntry(name),
_path(path.toString()),
- _namespaceIndex(txn, _path, name.toString()),
+ _namespaceIndex(opCtx, _path, name.toString()),
_extentManager(std::move(extentManager)) {
ScopeGuard onErrorClose = MakeGuard([&] {
- _namespaceIndex.close(txn);
- _extentManager->close(txn);
+ _namespaceIndex.close(opCtx);
+ _extentManager->close(opCtx);
});
massert(34469,
str::stream() << name << " is not a valid database name",
NamespaceString::validDBName(name));
- invariant(txn->lockState()->isDbLockedForMode(name, MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(name, MODE_X));
try {
// First init the .ns file. If this fails, we may leak the .ns file, but this is OK
// because subsequent openDB will go through this code path again.
- _namespaceIndex.init(txn);
+ _namespaceIndex.init(opCtx);
// Initialize the extent manager. This will create the first data file (.0) if needed
// and if this fails we would leak the .ns file above. Leaking the .ns or .0 file is
// acceptable, because subsequent openDB calls will exercise the code path again.
- Status s = _extentManager->init(txn);
+ Status s = _extentManager->init(opCtx);
if (!s.isOK()) {
msgasserted(16966, str::stream() << "_extentManager->init failed: " << s.toString());
}
// This is the actual loading of the on-disk structures into cache.
- _init(txn);
+ _init(opCtx);
} catch (const DBException& dbe) {
warning() << "database " << path << " " << name
<< " could not be opened due to DBException " << dbe.getCode() << ": "
@@ -230,8 +230,8 @@ void MMAPV1DatabaseCatalogEntry::_removeFromCache(RecoveryUnit* ru, StringData n
_collections.erase(i);
}
-Status MMAPV1DatabaseCatalogEntry::dropCollection(OperationContext* txn, StringData ns) {
- invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
+Status MMAPV1DatabaseCatalogEntry::dropCollection(OperationContext* opCtx, StringData ns) {
+ invariant(opCtx->lockState()->isCollectionLockedForMode(ns, MODE_X));
NamespaceDetails* details = _namespaceIndex.details(ns);
@@ -242,28 +242,28 @@ Status MMAPV1DatabaseCatalogEntry::dropCollection(OperationContext* txn, StringD
invariant(details->nIndexes == 0); // TODO: delete instead?
invariant(details->indexBuildsInProgress == 0); // TODO: delete instead?
- _removeNamespaceFromNamespaceCollection(txn, ns);
- _removeFromCache(txn->recoveryUnit(), ns);
+ _removeNamespaceFromNamespaceCollection(opCtx, ns);
+ _removeFromCache(opCtx->recoveryUnit(), ns);
// free extents
if (!details->firstExtent.isNull()) {
- _extentManager->freeExtents(txn, details->firstExtent, details->lastExtent);
- *txn->recoveryUnit()->writing(&details->firstExtent) = DiskLoc().setInvalid();
- *txn->recoveryUnit()->writing(&details->lastExtent) = DiskLoc().setInvalid();
+ _extentManager->freeExtents(opCtx, details->firstExtent, details->lastExtent);
+ *opCtx->recoveryUnit()->writing(&details->firstExtent) = DiskLoc().setInvalid();
+ *opCtx->recoveryUnit()->writing(&details->lastExtent) = DiskLoc().setInvalid();
}
// remove from the catalog hashtable
- _namespaceIndex.kill_ns(txn, ns);
+ _namespaceIndex.kill_ns(opCtx, ns);
return Status::OK();
}
-Status MMAPV1DatabaseCatalogEntry::renameCollection(OperationContext* txn,
+Status MMAPV1DatabaseCatalogEntry::renameCollection(OperationContext* opCtx,
StringData fromNS,
StringData toNS,
bool stayTemp) {
- Status s = _renameSingleNamespace(txn, fromNS, toNS, stayTemp);
+ Status s = _renameSingleNamespace(opCtx, fromNS, toNS, stayTemp);
if (!s.isOK())
return s;
@@ -271,7 +271,7 @@ Status MMAPV1DatabaseCatalogEntry::renameCollection(OperationContext* txn,
invariant(details);
RecordStoreV1Base* systemIndexRecordStore = _getIndexRecordStore();
- auto cursor = systemIndexRecordStore->getCursor(txn);
+ auto cursor = systemIndexRecordStore->getCursor(opCtx);
while (auto record = cursor->next()) {
BSONObj oldIndexSpec = record->data.releaseToBson();
if (fromNS != oldIndexSpec["ns"].valuestrsafe())
@@ -292,7 +292,7 @@ Status MMAPV1DatabaseCatalogEntry::renameCollection(OperationContext* txn,
}
StatusWith<RecordId> newIndexSpecLoc = systemIndexRecordStore->insertRecord(
- txn, newIndexSpec.objdata(), newIndexSpec.objsize(), false);
+ opCtx, newIndexSpec.objdata(), newIndexSpec.objsize(), false);
if (!newIndexSpecLoc.isOK())
return newIndexSpecLoc.getStatus();
@@ -300,10 +300,10 @@ Status MMAPV1DatabaseCatalogEntry::renameCollection(OperationContext* txn,
{
// Fix the IndexDetails pointer.
- int indexI = getCollectionCatalogEntry(toNS)->_findIndexNumber(txn, indexName);
+ int indexI = getCollectionCatalogEntry(toNS)->_findIndexNumber(opCtx, indexName);
IndexDetails& indexDetails = details->idx(indexI);
- *txn->recoveryUnit()->writing(&indexDetails.info) =
+ *opCtx->recoveryUnit()->writing(&indexDetails.info) =
DiskLoc::fromRecordId(newIndexSpecLoc.getValue());
}
@@ -312,21 +312,21 @@ Status MMAPV1DatabaseCatalogEntry::renameCollection(OperationContext* txn,
std::string oldIndexNs = IndexDescriptor::makeIndexNamespace(fromNS, indexName);
std::string newIndexNs = IndexDescriptor::makeIndexNamespace(toNS, indexName);
- Status s = _renameSingleNamespace(txn, oldIndexNs, newIndexNs, false);
+ Status s = _renameSingleNamespace(opCtx, oldIndexNs, newIndexNs, false);
if (!s.isOK())
return s;
}
// Invalidate index record for the old collection.
invalidateSystemCollectionRecord(
- txn, NamespaceString(name(), "system.indexes"), record->id);
+ opCtx, NamespaceString(name(), "system.indexes"), record->id);
- systemIndexRecordStore->deleteRecord(txn, record->id);
+ systemIndexRecordStore->deleteRecord(opCtx, record->id);
}
return Status::OK();
}
-Status MMAPV1DatabaseCatalogEntry::_renameSingleNamespace(OperationContext* txn,
+Status MMAPV1DatabaseCatalogEntry::_renameSingleNamespace(OperationContext* opCtx,
StringData fromNS,
StringData toNS,
bool stayTemp) {
@@ -345,20 +345,20 @@ Status MMAPV1DatabaseCatalogEntry::_renameSingleNamespace(OperationContext* txn,
// ----
// this could throw, but if it does we're ok
- _namespaceIndex.add_ns(txn, toNS, fromDetails);
+ _namespaceIndex.add_ns(opCtx, toNS, fromDetails);
NamespaceDetails* toDetails = _namespaceIndex.details(toNS);
try {
- toDetails->copyingFrom(txn, toNS, _namespaceIndex, fromDetails); // fixes extraOffset
+ toDetails->copyingFrom(opCtx, toNS, _namespaceIndex, fromDetails); // fixes extraOffset
} catch (DBException&) {
// could end up here if .ns is full - if so try to clean up / roll back a little
- _namespaceIndex.kill_ns(txn, toNS);
+ _namespaceIndex.kill_ns(opCtx, toNS);
throw;
}
// at this point, code .ns stuff moved
- _namespaceIndex.kill_ns(txn, fromNS);
+ _namespaceIndex.kill_ns(opCtx, fromNS);
fromDetails = NULL;
// fix system.namespaces
@@ -366,7 +366,8 @@ Status MMAPV1DatabaseCatalogEntry::_renameSingleNamespace(OperationContext* txn,
RecordId oldSpecLocation = getCollectionCatalogEntry(fromNS)->getNamespacesRecordId();
invariant(!oldSpecLocation.isNull());
{
- BSONObj oldSpec = _getNamespaceRecordStore()->dataFor(txn, oldSpecLocation).releaseToBson();
+ BSONObj oldSpec =
+ _getNamespaceRecordStore()->dataFor(opCtx, oldSpecLocation).releaseToBson();
invariant(!oldSpec.isEmpty());
BSONObjBuilder b;
@@ -383,33 +384,34 @@ Status MMAPV1DatabaseCatalogEntry::_renameSingleNamespace(OperationContext* txn,
newSpec = b.obj();
}
- RecordId rid = _addNamespaceToNamespaceCollection(txn, toNS, newSpec.isEmpty() ? 0 : &newSpec);
+ RecordId rid =
+ _addNamespaceToNamespaceCollection(opCtx, toNS, newSpec.isEmpty() ? 0 : &newSpec);
// Invalidate old namespace record
invalidateSystemCollectionRecord(
- txn, NamespaceString(name(), "system.namespaces"), oldSpecLocation);
+ opCtx, NamespaceString(name(), "system.namespaces"), oldSpecLocation);
- _getNamespaceRecordStore()->deleteRecord(txn, oldSpecLocation);
+ _getNamespaceRecordStore()->deleteRecord(opCtx, oldSpecLocation);
Entry*& entry = _collections[toNS.toString()];
invariant(entry == NULL);
- txn->recoveryUnit()->registerChange(new EntryInsertion(toNS, this));
+ opCtx->recoveryUnit()->registerChange(new EntryInsertion(toNS, this));
entry = new Entry();
- _removeFromCache(txn->recoveryUnit(), fromNS);
- _insertInCache(txn, toNS, rid, entry);
+ _removeFromCache(opCtx->recoveryUnit(), fromNS);
+ _insertInCache(opCtx, toNS, rid, entry);
return Status::OK();
}
void MMAPV1DatabaseCatalogEntry::invalidateSystemCollectionRecord(
- OperationContext* txn, NamespaceString systemCollectionNamespace, RecordId record) {
+ OperationContext* opCtx, NamespaceString systemCollectionNamespace, RecordId record) {
// Having to go back up through the DatabaseHolder is a bit of a layering
// violation, but at this point we're not going to add more MMAPv1 specific interfaces.
StringData dbName = systemCollectionNamespace.db();
- invariant(txn->lockState()->isDbLockedForMode(dbName, MODE_X));
- Database* db = dbHolder().get(txn, dbName);
+ invariant(opCtx->lockState()->isDbLockedForMode(dbName, MODE_X));
+ Database* db = dbHolder().get(opCtx, dbName);
Collection* systemCollection = db->getCollection(systemCollectionNamespace);
- systemCollection->getCursorManager()->invalidateDocument(txn, record, INVALIDATION_DELETION);
+ systemCollection->getCursorManager()->invalidateDocument(opCtx, record, INVALIDATION_DELETION);
}
void MMAPV1DatabaseCatalogEntry::appendExtraStats(OperationContext* opCtx,
@@ -491,7 +493,7 @@ void MMAPV1DatabaseCatalogEntry::getCollectionNamespaces(std::list<std::string>*
_namespaceIndex.getCollectionNamespaces(tofill);
}
-void MMAPV1DatabaseCatalogEntry::_ensureSystemCollection(OperationContext* txn, StringData ns) {
+void MMAPV1DatabaseCatalogEntry::_ensureSystemCollection(OperationContext* opCtx, StringData ns) {
NamespaceDetails* details = _namespaceIndex.details(ns);
if (details) {
return;
@@ -502,14 +504,14 @@ void MMAPV1DatabaseCatalogEntry::_ensureSystemCollection(OperationContext* txn,
fassertFailed(34372);
}
- _namespaceIndex.add_ns(txn, ns, DiskLoc(), false);
+ _namespaceIndex.add_ns(opCtx, ns, DiskLoc(), false);
}
-void MMAPV1DatabaseCatalogEntry::_init(OperationContext* txn) {
+void MMAPV1DatabaseCatalogEntry::_init(OperationContext* opCtx) {
// We wrap the WUOW in an optional as we can't create it if we are in RO mode.
boost::optional<WriteUnitOfWork> wunit;
if (!storageGlobalParams.readOnly) {
- wunit.emplace(txn);
+ wunit.emplace(opCtx);
}
// Upgrade freelist
@@ -524,13 +526,13 @@ void MMAPV1DatabaseCatalogEntry::_init(OperationContext* txn) {
if (!freeListDetails->firstExtent.isNull()) {
_extentManager->freeExtents(
- txn, freeListDetails->firstExtent, freeListDetails->lastExtent);
+ opCtx, freeListDetails->firstExtent, freeListDetails->lastExtent);
}
- _namespaceIndex.kill_ns(txn, oldFreeList.ns());
+ _namespaceIndex.kill_ns(opCtx, oldFreeList.ns());
}
- DataFileVersion version = _extentManager->getFileFormat(txn);
+ DataFileVersion version = _extentManager->getFileFormat(opCtx);
if (version.isCompatibleWithCurrentCode().isOK() && !version.mayHave30Freelist()) {
if (storageGlobalParams.readOnly) {
severe() << "Legacy storage format detected, but server was started with the "
@@ -540,7 +542,7 @@ void MMAPV1DatabaseCatalogEntry::_init(OperationContext* txn) {
// Any DB that can be opened and written to gets this flag set.
version.setMayHave30Freelist();
- _extentManager->setFileFormat(txn, version);
+ _extentManager->setFileFormat(opCtx, version);
}
const NamespaceString nsi(name(), "system.indexes");
@@ -549,16 +551,16 @@ void MMAPV1DatabaseCatalogEntry::_init(OperationContext* txn) {
bool isSystemNamespacesGoingToBeNew = _namespaceIndex.details(nsn.toString()) == NULL;
bool isSystemIndexesGoingToBeNew = _namespaceIndex.details(nsi.toString()) == NULL;
- _ensureSystemCollection(txn, nsn.toString());
- _ensureSystemCollection(txn, nsi.toString());
+ _ensureSystemCollection(opCtx, nsn.toString());
+ _ensureSystemCollection(opCtx, nsi.toString());
if (isSystemNamespacesGoingToBeNew) {
invariant(!storageGlobalParams.readOnly);
- txn->recoveryUnit()->registerChange(new EntryInsertion(nsn.toString(), this));
+ opCtx->recoveryUnit()->registerChange(new EntryInsertion(nsn.toString(), this));
}
if (isSystemIndexesGoingToBeNew) {
invariant(!storageGlobalParams.readOnly);
- txn->recoveryUnit()->registerChange(new EntryInsertion(nsi.toString(), this));
+ opCtx->recoveryUnit()->registerChange(new EntryInsertion(nsi.toString(), this));
}
Entry*& indexEntry = _collections[nsi.toString()];
@@ -578,7 +580,7 @@ void MMAPV1DatabaseCatalogEntry::_init(OperationContext* txn) {
NamespaceDetailsRSV1MetaData* md =
new NamespaceDetailsRSV1MetaData(nsn.toString(), nsDetails);
nsEntry->recordStore.reset(
- new SimpleRecordStoreV1(txn, nsn.toString(), md, _extentManager.get(), false));
+ new SimpleRecordStoreV1(opCtx, nsn.toString(), md, _extentManager.get(), false));
}
if (!indexEntry) {
@@ -588,12 +590,12 @@ void MMAPV1DatabaseCatalogEntry::_init(OperationContext* txn) {
new NamespaceDetailsRSV1MetaData(nsi.toString(), indexDetails);
indexEntry->recordStore.reset(
- new SimpleRecordStoreV1(txn, nsi.toString(), md, _extentManager.get(), true));
+ new SimpleRecordStoreV1(opCtx, nsi.toString(), md, _extentManager.get(), true));
}
RecordId indexNamespaceId;
if (isSystemIndexesGoingToBeNew) {
- indexNamespaceId = _addNamespaceToNamespaceCollection(txn, nsi.toString(), NULL);
+ indexNamespaceId = _addNamespaceToNamespaceCollection(opCtx, nsi.toString(), NULL);
}
if (!nsEntry->catalogEntry) {
@@ -625,7 +627,7 @@ void MMAPV1DatabaseCatalogEntry::_init(OperationContext* txn) {
RecordStoreV1Base* rs = _getNamespaceRecordStore();
invariant(rs);
- auto cursor = rs->getCursor(txn);
+ auto cursor = rs->getCursor(opCtx);
while (auto record = cursor->next()) {
auto ns = record->data.releaseToBson()["name"].String();
Entry*& entry = _collections[ns];
@@ -635,7 +637,7 @@ void MMAPV1DatabaseCatalogEntry::_init(OperationContext* txn) {
// because they don't have indexes on them anyway.
if (entry) {
if (entry->catalogEntry->getNamespacesRecordId().isNull()) {
- entry->catalogEntry->setNamespacesRecordId(txn, record->id);
+ entry->catalogEntry->setNamespacesRecordId(opCtx, record->id);
} else {
invariant(entry->catalogEntry->getNamespacesRecordId() == record->id);
}
@@ -643,11 +645,11 @@ void MMAPV1DatabaseCatalogEntry::_init(OperationContext* txn) {
}
entry = new Entry();
- _insertInCache(txn, ns, record->id, entry);
+ _insertInCache(opCtx, ns, record->id, entry);
}
}
-Status MMAPV1DatabaseCatalogEntry::createCollection(OperationContext* txn,
+Status MMAPV1DatabaseCatalogEntry::createCollection(OperationContext* opCtx,
StringData ns,
const CollectionOptions& options,
bool allocateDefaultSpace) {
@@ -657,36 +659,36 @@ Status MMAPV1DatabaseCatalogEntry::createCollection(OperationContext* txn,
}
BSONObj optionsAsBSON = options.toBSON();
- RecordId rid = _addNamespaceToNamespaceCollection(txn, ns, &optionsAsBSON);
+ RecordId rid = _addNamespaceToNamespaceCollection(opCtx, ns, &optionsAsBSON);
- _namespaceIndex.add_ns(txn, ns, DiskLoc(), options.capped);
+ _namespaceIndex.add_ns(opCtx, ns, DiskLoc(), options.capped);
NamespaceDetails* details = _namespaceIndex.details(ns);
// Set the flags.
- NamespaceDetailsRSV1MetaData(ns, details).replaceUserFlags(txn, options.flags);
+ NamespaceDetailsRSV1MetaData(ns, details).replaceUserFlags(opCtx, options.flags);
if (options.capped && options.cappedMaxDocs > 0) {
- txn->recoveryUnit()->writingInt(details->maxDocsInCapped) = options.cappedMaxDocs;
+ opCtx->recoveryUnit()->writingInt(details->maxDocsInCapped) = options.cappedMaxDocs;
}
Entry*& entry = _collections[ns.toString()];
invariant(!entry);
- txn->recoveryUnit()->registerChange(new EntryInsertion(ns, this));
+ opCtx->recoveryUnit()->registerChange(new EntryInsertion(ns, this));
entry = new Entry();
- _insertInCache(txn, ns, rid, entry);
+ _insertInCache(opCtx, ns, rid, entry);
if (allocateDefaultSpace) {
RecordStoreV1Base* rs = _getRecordStore(ns);
if (options.initialNumExtents > 0) {
int size = _massageExtentSize(_extentManager.get(), options.cappedSize);
for (int i = 0; i < options.initialNumExtents; i++) {
- rs->increaseStorageSize(txn, size, false);
+ rs->increaseStorageSize(opCtx, size, false);
}
} else if (!options.initialExtentSizes.empty()) {
for (size_t i = 0; i < options.initialExtentSizes.size(); i++) {
int size = options.initialExtentSizes[i];
size = _massageExtentSize(_extentManager.get(), size);
- rs->increaseStorageSize(txn, size, false);
+ rs->increaseStorageSize(opCtx, size, false);
}
} else if (options.capped) {
// normal
@@ -694,34 +696,34 @@ Status MMAPV1DatabaseCatalogEntry::createCollection(OperationContext* txn,
// Must do this at least once, otherwise we leave the collection with no
// extents, which is invalid.
int sz = _massageExtentSize(_extentManager.get(),
- options.cappedSize - rs->storageSize(txn));
+ options.cappedSize - rs->storageSize(opCtx));
sz &= 0xffffff00;
- rs->increaseStorageSize(txn, sz, false);
- } while (rs->storageSize(txn) < options.cappedSize);
+ rs->increaseStorageSize(opCtx, sz, false);
+ } while (rs->storageSize(opCtx) < options.cappedSize);
} else {
- rs->increaseStorageSize(txn, _extentManager->initialSize(128), false);
+ rs->increaseStorageSize(opCtx, _extentManager->initialSize(128), false);
}
}
if (!options.collation.isEmpty()) {
- markCollationFeatureAsInUse(txn);
+ markCollationFeatureAsInUse(opCtx);
}
return Status::OK();
}
-void MMAPV1DatabaseCatalogEntry::createNamespaceForIndex(OperationContext* txn, StringData name) {
+void MMAPV1DatabaseCatalogEntry::createNamespaceForIndex(OperationContext* opCtx, StringData name) {
// This is a simplified form of createCollection.
invariant(!_namespaceIndex.details(name));
- RecordId rid = _addNamespaceToNamespaceCollection(txn, name, NULL);
- _namespaceIndex.add_ns(txn, name, DiskLoc(), false);
+ RecordId rid = _addNamespaceToNamespaceCollection(opCtx, name, NULL);
+ _namespaceIndex.add_ns(opCtx, name, DiskLoc(), false);
Entry*& entry = _collections[name.toString()];
invariant(!entry);
- txn->recoveryUnit()->registerChange(new EntryInsertion(name, this));
+ opCtx->recoveryUnit()->registerChange(new EntryInsertion(name, this));
entry = new Entry();
- _insertInCache(txn, name, rid, entry);
+ _insertInCache(opCtx, name, rid, entry);
}
NamespaceDetailsCollectionCatalogEntry* MMAPV1DatabaseCatalogEntry::getCollectionCatalogEntry(
@@ -735,7 +737,7 @@ NamespaceDetailsCollectionCatalogEntry* MMAPV1DatabaseCatalogEntry::getCollectio
return i->second->catalogEntry.get();
}
-void MMAPV1DatabaseCatalogEntry::_insertInCache(OperationContext* txn,
+void MMAPV1DatabaseCatalogEntry::_insertInCache(OperationContext* opCtx,
StringData ns,
RecordId rid,
Entry* entry) {
@@ -750,10 +752,10 @@ void MMAPV1DatabaseCatalogEntry::_insertInCache(OperationContext* txn,
if (details->isCapped) {
entry->recordStore.reset(new CappedRecordStoreV1(
- txn, NULL, ns, md.release(), _extentManager.get(), nss.coll() == "system.indexes"));
+ opCtx, NULL, ns, md.release(), _extentManager.get(), nss.coll() == "system.indexes"));
} else {
entry->recordStore.reset(new SimpleRecordStoreV1(
- txn, ns, md.release(), _extentManager.get(), nss.coll() == "system.indexes"));
+ opCtx, ns, md.release(), _extentManager.get(), nss.coll() == "system.indexes"));
}
}
@@ -771,7 +773,7 @@ RecordStoreV1Base* MMAPV1DatabaseCatalogEntry::_getRecordStore(StringData ns) co
return i->second->recordStore.get();
}
-IndexAccessMethod* MMAPV1DatabaseCatalogEntry::getIndex(OperationContext* txn,
+IndexAccessMethod* MMAPV1DatabaseCatalogEntry::getIndex(OperationContext* opCtx,
const CollectionCatalogEntry* collection,
IndexCatalogEntry* entry) {
const std::string& type = entry->descriptor()->getAccessMethodName();
@@ -828,7 +830,7 @@ RecordStoreV1Base* MMAPV1DatabaseCatalogEntry::_getNamespaceRecordStore() const
return i->second->recordStore.get();
}
-RecordId MMAPV1DatabaseCatalogEntry::_addNamespaceToNamespaceCollection(OperationContext* txn,
+RecordId MMAPV1DatabaseCatalogEntry::_addNamespaceToNamespaceCollection(OperationContext* opCtx,
StringData ns,
const BSONObj* options) {
if (nsToCollectionSubstring(ns) == "system.namespaces") {
@@ -847,12 +849,12 @@ RecordId MMAPV1DatabaseCatalogEntry::_addNamespaceToNamespaceCollection(Operatio
RecordStoreV1Base* rs = _getNamespaceRecordStore();
invariant(rs);
- StatusWith<RecordId> loc = rs->insertRecord(txn, obj.objdata(), obj.objsize(), false);
+ StatusWith<RecordId> loc = rs->insertRecord(opCtx, obj.objdata(), obj.objsize(), false);
massertStatusOK(loc.getStatus());
return loc.getValue();
}
-void MMAPV1DatabaseCatalogEntry::_removeNamespaceFromNamespaceCollection(OperationContext* txn,
+void MMAPV1DatabaseCatalogEntry::_removeNamespaceFromNamespaceCollection(OperationContext* opCtx,
StringData ns) {
if (nsToCollectionSubstring(ns) == "system.namespaces") {
// system.namespaces holds all the others, so it is not explicitly listed in the catalog.
@@ -870,12 +872,12 @@ void MMAPV1DatabaseCatalogEntry::_removeNamespaceFromNamespaceCollection(Operati
// Invalidate old namespace record
RecordId oldSpecLocation = entry->second->catalogEntry->getNamespacesRecordId();
invalidateSystemCollectionRecord(
- txn, NamespaceString(name(), "system.namespaces"), oldSpecLocation);
+ opCtx, NamespaceString(name(), "system.namespaces"), oldSpecLocation);
- rs->deleteRecord(txn, oldSpecLocation);
+ rs->deleteRecord(opCtx, oldSpecLocation);
}
-CollectionOptions MMAPV1DatabaseCatalogEntry::getCollectionOptions(OperationContext* txn,
+CollectionOptions MMAPV1DatabaseCatalogEntry::getCollectionOptions(OperationContext* opCtx,
StringData ns) const {
if (nsToCollectionSubstring(ns) == "system.namespaces") {
return {};
@@ -886,10 +888,10 @@ CollectionOptions MMAPV1DatabaseCatalogEntry::getCollectionOptions(OperationCont
return {};
}
- return getCollectionOptions(txn, entry->second->catalogEntry->getNamespacesRecordId());
+ return getCollectionOptions(opCtx, entry->second->catalogEntry->getNamespacesRecordId());
}
-CollectionOptions MMAPV1DatabaseCatalogEntry::getCollectionOptions(OperationContext* txn,
+CollectionOptions MMAPV1DatabaseCatalogEntry::getCollectionOptions(OperationContext* opCtx,
RecordId rid) const {
CollectionOptions options;
@@ -901,7 +903,7 @@ CollectionOptions MMAPV1DatabaseCatalogEntry::getCollectionOptions(OperationCont
invariant(rs);
RecordData data;
- invariant(rs->findRecord(txn, rid, &data));
+ invariant(rs->findRecord(opCtx, rid, &data));
if (data.releaseToBson()["options"].isABSONObj()) {
Status status = options.parse(data.releaseToBson()["options"].Obj());
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h
index ea4342bb868..67e562d4fe2 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h
@@ -53,7 +53,7 @@ class OperationContext;
class MMAPV1DatabaseCatalogEntry : public DatabaseCatalogEntry {
public:
- MMAPV1DatabaseCatalogEntry(OperationContext* txn,
+ MMAPV1DatabaseCatalogEntry(OperationContext* opCtx,
StringData name,
StringData path,
bool directoryperdb,
@@ -65,9 +65,9 @@ public:
/**
* Must be called before destruction.
*/
- virtual void close(OperationContext* txn) {
- _extentManager->close(txn);
- _namespaceIndex.close(txn);
+ virtual void close(OperationContext* opCtx) {
+ _extentManager->close(opCtx);
+ _namespaceIndex.close(opCtx);
}
// these two seem the same and yet different
@@ -98,14 +98,14 @@ public:
virtual void appendExtraStats(OperationContext* opCtx, BSONObjBuilder* out, double scale) const;
- Status createCollection(OperationContext* txn,
+ Status createCollection(OperationContext* opCtx,
StringData ns,
const CollectionOptions& options,
bool allocateDefaultSpace);
- Status dropCollection(OperationContext* txn, StringData ns);
+ Status dropCollection(OperationContext* opCtx, StringData ns);
- Status renameCollection(OperationContext* txn,
+ Status renameCollection(OperationContext* opCtx,
StringData fromNS,
StringData toNS,
bool stayTemp);
@@ -119,7 +119,7 @@ public:
RecordStore* getRecordStore(StringData ns) const;
- IndexAccessMethod* getIndex(OperationContext* txn,
+ IndexAccessMethod* getIndex(OperationContext* opCtx,
const CollectionCatalogEntry* collection,
IndexCatalogEntry* index);
@@ -130,17 +130,17 @@ public:
return _extentManager.get();
}
- CollectionOptions getCollectionOptions(OperationContext* txn, StringData ns) const;
+ CollectionOptions getCollectionOptions(OperationContext* opCtx, StringData ns) const;
- CollectionOptions getCollectionOptions(OperationContext* txn, RecordId nsRid) const;
+ CollectionOptions getCollectionOptions(OperationContext* opCtx, RecordId nsRid) const;
/**
* Creates a CollectionCatalogEntry in the form of an index rather than a collection.
* MMAPv1 puts both indexes and collections into CCEs. A namespace named 'name' must not
* exist.
*/
- void createNamespaceForIndex(OperationContext* txn, StringData name);
- static void invalidateSystemCollectionRecord(OperationContext* txn,
+ void createNamespaceForIndex(OperationContext* opCtx, StringData name);
+ static void invalidateSystemCollectionRecord(OperationContext* opCtx,
NamespaceString systemCollectionNamespace,
RecordId record);
@@ -172,20 +172,20 @@ private:
RecordStoreV1Base* _getNamespaceRecordStore() const;
RecordStoreV1Base* _getRecordStore(StringData ns) const;
- RecordId _addNamespaceToNamespaceCollection(OperationContext* txn,
+ RecordId _addNamespaceToNamespaceCollection(OperationContext* opCtx,
StringData ns,
const BSONObj* options);
- void _removeNamespaceFromNamespaceCollection(OperationContext* txn, StringData ns);
+ void _removeNamespaceFromNamespaceCollection(OperationContext* opCtx, StringData ns);
- Status _renameSingleNamespace(OperationContext* txn,
+ Status _renameSingleNamespace(OperationContext* opCtx,
StringData fromNS,
StringData toNS,
bool stayTemp);
- void _ensureSystemCollection(OperationContext* txn, StringData ns);
+ void _ensureSystemCollection(OperationContext* opCtx, StringData ns);
- void _init(OperationContext* txn);
+ void _init(OperationContext* opCtx);
/**
* Populate the _collections cache.
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
index 36af8f3f06a..5a784b25dc7 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
@@ -305,24 +305,24 @@ DatabaseCatalogEntry* MMAPV1Engine::getDatabaseCatalogEntry(OperationContext* op
return entry;
}
-Status MMAPV1Engine::closeDatabase(OperationContext* txn, StringData db) {
+Status MMAPV1Engine::closeDatabase(OperationContext* opCtx, StringData db) {
// Before the files are closed, flush any potentially outstanding changes, which might
// reference this database. Otherwise we will assert when subsequent applications of the
// global journal entries occur, which happen to have write intents for the removed files.
- getDur().syncDataAndTruncateJournal(txn);
+ getDur().syncDataAndTruncateJournal(opCtx);
stdx::lock_guard<stdx::mutex> lk(_entryMapMutex);
MMAPV1DatabaseCatalogEntry* entry = _entryMap[db.toString()];
if (entry) {
- entry->close(txn);
+ entry->close(opCtx);
}
delete entry;
_entryMap.erase(db.toString());
return Status::OK();
}
-Status MMAPV1Engine::dropDatabase(OperationContext* txn, StringData db) {
- Status status = closeDatabase(txn, db);
+Status MMAPV1Engine::dropDatabase(OperationContext* opCtx, StringData db) {
+ Status status = closeDatabase(opCtx, db);
if (!status.isOK())
return status;
@@ -350,15 +350,15 @@ void MMAPV1Engine::_listDatabases(const std::string& directory, std::vector<std:
}
}
-int MMAPV1Engine::flushAllFiles(OperationContext* txn, bool sync) {
- return MongoFile::flushAll(txn, sync);
+int MMAPV1Engine::flushAllFiles(OperationContext* opCtx, bool sync) {
+ return MongoFile::flushAll(opCtx, sync);
}
-Status MMAPV1Engine::beginBackup(OperationContext* txn) {
+Status MMAPV1Engine::beginBackup(OperationContext* opCtx) {
return Status::OK();
}
-void MMAPV1Engine::endBackup(OperationContext* txn) {
+void MMAPV1Engine::endBackup(OperationContext* opCtx) {
return;
}
@@ -379,15 +379,15 @@ void MMAPV1Engine::cleanShutdown() {
// we would only hang here if the file_allocator code generates a
// synchronous signal, which we don't expect
log() << "shutdown: waiting for fs preallocator..." << endl;
- auto txn = cc().getOperationContext();
+ auto opCtx = cc().getOperationContext();
// In some cases we may shutdown early before we have any operation context yet, but we need
// one for synchronization purposes.
ServiceContext::UniqueOperationContext newTxn;
- if (!txn) {
+ if (!opCtx) {
newTxn = cc().makeOperationContext();
- txn = newTxn.get();
- invariant(txn);
+ opCtx = newTxn.get();
+ invariant(opCtx);
}
FileAllocator::get()->waitUntilFinished();
@@ -395,12 +395,12 @@ void MMAPV1Engine::cleanShutdown() {
if (storageGlobalParams.dur) {
log() << "shutdown: final commit..." << endl;
- getDur().commitAndStopDurThread(txn);
+ getDur().commitAndStopDurThread(opCtx);
}
log() << "shutdown: closing all files..." << endl;
stringstream ss3;
- MemoryMappedFile::closeAllFiles(txn, ss3);
+ MemoryMappedFile::closeAllFiles(opCtx, ss3);
log() << ss3.str() << endl;
}
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h
index b5d19950d7b..0d7c6b3711e 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h
@@ -57,9 +57,9 @@ public:
RecoveryUnit* newRecoveryUnit();
void listDatabases(std::vector<std::string>* out) const;
- int flushAllFiles(OperationContext* txn, bool sync);
- Status beginBackup(OperationContext* txn);
- void endBackup(OperationContext* txn);
+ int flushAllFiles(OperationContext* opCtx, bool sync);
+ Status beginBackup(OperationContext* opCtx);
+ void endBackup(OperationContext* opCtx);
DatabaseCatalogEntry* getDatabaseCatalogEntry(OperationContext* opCtx, StringData db);
@@ -74,19 +74,19 @@ public:
virtual bool isEphemeral() const;
- virtual Status closeDatabase(OperationContext* txn, StringData db);
+ virtual Status closeDatabase(OperationContext* opCtx, StringData db);
- virtual Status dropDatabase(OperationContext* txn, StringData db);
+ virtual Status dropDatabase(OperationContext* opCtx, StringData db);
virtual void cleanShutdown();
// Callers should use repairDatabase instead.
- virtual Status repairRecordStore(OperationContext* txn, const std::string& ns) {
+ virtual Status repairRecordStore(OperationContext* opCtx, const std::string& ns) {
return Status(ErrorCodes::InternalError, "MMAPv1 doesn't support repairRecordStore");
}
// MMAPv1 specific (non-virtual)
- Status repairDatabase(OperationContext* txn,
+ Status repairDatabase(OperationContext* opCtx,
const std::string& dbName,
bool preserveClonedFilesOnFailure,
bool backupOriginalFiles);
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
index 3f9b6019802..29d7952bf22 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
@@ -79,9 +79,9 @@ class MmapV1RecordFetcher : public RecordFetcher {
public:
explicit MmapV1RecordFetcher(const MmapV1RecordHeader* record) : _record(record) {}
- virtual void setup(OperationContext* txn) {
+ virtual void setup(OperationContext* opCtx) {
invariant(!_filesLock.get());
- _filesLock.reset(new LockMongoFilesShared(txn));
+ _filesLock.reset(new LockMongoFilesShared(opCtx));
}
virtual void fetch() {
@@ -138,7 +138,7 @@ boost::filesystem::path MmapV1ExtentManager::_fileName(int n) const {
}
-Status MmapV1ExtentManager::init(OperationContext* txn) {
+Status MmapV1ExtentManager::init(OperationContext* opCtx) {
invariant(_files.empty());
for (int n = 0; n < DiskLoc::MaxFiles; n++) {
@@ -172,18 +172,18 @@ Status MmapV1ExtentManager::init(OperationContext* txn) {
}
}
- unique_ptr<DataFile> df(new DataFile(txn, n));
+ unique_ptr<DataFile> df(new DataFile(opCtx, n));
- Status s = df->openExisting(txn, fullNameString.c_str());
+ Status s = df->openExisting(opCtx, fullNameString.c_str());
if (!s.isOK()) {
- df->close(txn);
+ df->close(opCtx);
return s;
}
invariant(!df->getHeader()->uninitialized());
// We only checkUpgrade on files that we are keeping, not preallocs.
- df->getHeader()->checkUpgrade(txn);
+ df->getHeader()->checkUpgrade(opCtx);
_files.push_back(df.release());
}
@@ -191,13 +191,13 @@ Status MmapV1ExtentManager::init(OperationContext* txn) {
// If this is a new database being created, instantiate the first file and one extent so
// we can have a coherent database.
if (_files.empty()) {
- WriteUnitOfWork wuow(txn);
- _createExtent(txn, initialSize(128), false);
+ WriteUnitOfWork wuow(opCtx);
+ _createExtent(opCtx, initialSize(128), false);
wuow.commit();
// Commit the journal and all changes to disk so that even if exceptions occur during
// subsequent initialization, we won't have uncommited changes during file close.
- getDur().commitNow(txn);
+ getDur().commitNow(opCtx);
}
return Status::OK();
@@ -221,12 +221,12 @@ DataFile* MmapV1ExtentManager::_getOpenFile(int fileId) {
return _files[fileId];
}
-DataFile* MmapV1ExtentManager::_addAFile(OperationContext* txn,
+DataFile* MmapV1ExtentManager::_addAFile(OperationContext* opCtx,
int sizeNeeded,
bool preallocateNextFile) {
// Database must be stable and we need to be in some sort of an update operation in order
// to add a new file.
- invariant(txn->lockState()->isDbLockedForMode(_dbname, MODE_IX));
+ invariant(opCtx->lockState()->isDbLockedForMode(_dbname, MODE_IX));
const int allocFileId = _files.size();
@@ -241,15 +241,15 @@ DataFile* MmapV1ExtentManager::_addAFile(OperationContext* txn,
}
{
- unique_ptr<DataFile> allocFile(new DataFile(txn, allocFileId));
+ unique_ptr<DataFile> allocFile(new DataFile(opCtx, allocFileId));
const string allocFileName = _fileName(allocFileId).string();
Timer t;
try {
- allocFile->open(txn, allocFileName.c_str(), minSize, false);
+ allocFile->open(opCtx, allocFileName.c_str(), minSize, false);
} catch (...) {
- allocFile->close(txn);
+ allocFile->close(opCtx);
throw;
}
if (t.seconds() > 1) {
@@ -263,13 +263,13 @@ DataFile* MmapV1ExtentManager::_addAFile(OperationContext* txn,
// Preallocate is asynchronous
if (preallocateNextFile) {
- unique_ptr<DataFile> nextFile(new DataFile(txn, allocFileId + 1));
+ unique_ptr<DataFile> nextFile(new DataFile(opCtx, allocFileId + 1));
const string nextFileName = _fileName(allocFileId + 1).string();
try {
- nextFile->open(txn, nextFileName.c_str(), minSize, false);
+ nextFile->open(opCtx, nextFileName.c_str(), minSize, false);
} catch (...) {
- nextFile->close(txn);
+ nextFile->close(opCtx);
throw;
}
}
@@ -366,26 +366,26 @@ int MmapV1ExtentManager::maxSize() const {
}
DiskLoc MmapV1ExtentManager::_createExtentInFile(
- OperationContext* txn, int fileNo, DataFile* f, int size, bool enforceQuota) {
+ OperationContext* opCtx, int fileNo, DataFile* f, int size, bool enforceQuota) {
_checkQuota(enforceQuota, fileNo - 1);
massert(10358, "bad new extent size", size >= minSize() && size <= maxSize());
- DiskLoc loc = f->allocExtentArea(txn, size);
+ DiskLoc loc = f->allocExtentArea(opCtx, size);
loc.assertOk();
Extent* e = getExtent(loc, false);
verify(e);
- *txn->recoveryUnit()->writing(&e->magic) = Extent::extentSignature;
- *txn->recoveryUnit()->writing(&e->myLoc) = loc;
- *txn->recoveryUnit()->writing(&e->length) = size;
+ *opCtx->recoveryUnit()->writing(&e->magic) = Extent::extentSignature;
+ *opCtx->recoveryUnit()->writing(&e->myLoc) = loc;
+ *opCtx->recoveryUnit()->writing(&e->length) = size;
return loc;
}
-DiskLoc MmapV1ExtentManager::_createExtent(OperationContext* txn, int size, bool enforceQuota) {
+DiskLoc MmapV1ExtentManager::_createExtent(OperationContext* opCtx, int size, bool enforceQuota) {
size = quantizeExtentSize(size);
if (size > maxSize())
@@ -398,7 +398,7 @@ DiskLoc MmapV1ExtentManager::_createExtent(OperationContext* txn, int size, bool
invariant(f);
if (f->getHeader()->unusedLength >= size) {
- return _createExtentInFile(txn, i, f, size, enforceQuota);
+ return _createExtentInFile(opCtx, i, f, size, enforceQuota);
}
}
@@ -407,10 +407,10 @@ DiskLoc MmapV1ExtentManager::_createExtent(OperationContext* txn, int size, bool
// no space in an existing file
// allocate files until we either get one big enough or hit maxSize
for (int i = 0; i < 8; i++) {
- DataFile* f = _addAFile(txn, size, false);
+ DataFile* f = _addAFile(opCtx, size, false);
if (f->getHeader()->unusedLength >= size) {
- return _createExtentInFile(txn, numFiles() - 1, f, size, enforceQuota);
+ return _createExtentInFile(opCtx, numFiles() - 1, f, size, enforceQuota);
}
}
@@ -418,7 +418,7 @@ DiskLoc MmapV1ExtentManager::_createExtent(OperationContext* txn, int size, bool
msgasserted(14810, "couldn't allocate space for a new extent");
}
-DiskLoc MmapV1ExtentManager::_allocFromFreeList(OperationContext* txn,
+DiskLoc MmapV1ExtentManager::_allocFromFreeList(OperationContext* opCtx,
int approxSize,
bool capped) {
// setup extent constraints
@@ -493,27 +493,27 @@ DiskLoc MmapV1ExtentManager::_allocFromFreeList(OperationContext* txn,
// remove from the free list
if (!best->xprev.isNull())
- *txn->recoveryUnit()->writing(&getExtent(best->xprev)->xnext) = best->xnext;
+ *opCtx->recoveryUnit()->writing(&getExtent(best->xprev)->xnext) = best->xnext;
if (!best->xnext.isNull())
- *txn->recoveryUnit()->writing(&getExtent(best->xnext)->xprev) = best->xprev;
+ *opCtx->recoveryUnit()->writing(&getExtent(best->xnext)->xprev) = best->xprev;
if (_getFreeListStart() == best->myLoc)
- _setFreeListStart(txn, best->xnext);
+ _setFreeListStart(opCtx, best->xnext);
if (_getFreeListEnd() == best->myLoc)
- _setFreeListEnd(txn, best->xprev);
+ _setFreeListEnd(opCtx, best->xprev);
return best->myLoc;
}
-DiskLoc MmapV1ExtentManager::allocateExtent(OperationContext* txn,
+DiskLoc MmapV1ExtentManager::allocateExtent(OperationContext* opCtx,
bool capped,
int size,
bool enforceQuota) {
- Lock::ResourceLock rlk(txn->lockState(), _rid, MODE_X);
+ Lock::ResourceLock rlk(opCtx->lockState(), _rid, MODE_X);
bool fromFreeList = true;
- DiskLoc eloc = _allocFromFreeList(txn, size, capped);
+ DiskLoc eloc = _allocFromFreeList(opCtx, size, capped);
if (eloc.isNull()) {
fromFreeList = false;
- eloc = _createExtent(txn, size, enforceQuota);
+ eloc = _createExtent(opCtx, size, enforceQuota);
}
invariant(!eloc.isNull());
@@ -525,29 +525,29 @@ DiskLoc MmapV1ExtentManager::allocateExtent(OperationContext* txn,
return eloc;
}
-void MmapV1ExtentManager::freeExtent(OperationContext* txn, DiskLoc firstExt) {
- Lock::ResourceLock rlk(txn->lockState(), _rid, MODE_X);
+void MmapV1ExtentManager::freeExtent(OperationContext* opCtx, DiskLoc firstExt) {
+ Lock::ResourceLock rlk(opCtx->lockState(), _rid, MODE_X);
Extent* e = getExtent(firstExt);
- txn->recoveryUnit()->writing(&e->xnext)->Null();
- txn->recoveryUnit()->writing(&e->xprev)->Null();
- txn->recoveryUnit()->writing(&e->firstRecord)->Null();
- txn->recoveryUnit()->writing(&e->lastRecord)->Null();
+ opCtx->recoveryUnit()->writing(&e->xnext)->Null();
+ opCtx->recoveryUnit()->writing(&e->xprev)->Null();
+ opCtx->recoveryUnit()->writing(&e->firstRecord)->Null();
+ opCtx->recoveryUnit()->writing(&e->lastRecord)->Null();
if (_getFreeListStart().isNull()) {
- _setFreeListStart(txn, firstExt);
- _setFreeListEnd(txn, firstExt);
+ _setFreeListStart(opCtx, firstExt);
+ _setFreeListEnd(opCtx, firstExt);
} else {
DiskLoc a = _getFreeListStart();
invariant(getExtent(a)->xprev.isNull());
- *txn->recoveryUnit()->writing(&getExtent(a)->xprev) = firstExt;
- *txn->recoveryUnit()->writing(&getExtent(firstExt)->xnext) = a;
- _setFreeListStart(txn, firstExt);
+ *opCtx->recoveryUnit()->writing(&getExtent(a)->xprev) = firstExt;
+ *opCtx->recoveryUnit()->writing(&getExtent(firstExt)->xnext) = a;
+ _setFreeListStart(opCtx, firstExt);
}
}
-void MmapV1ExtentManager::freeExtents(OperationContext* txn, DiskLoc firstExt, DiskLoc lastExt) {
- Lock::ResourceLock rlk(txn->lockState(), _rid, MODE_X);
+void MmapV1ExtentManager::freeExtents(OperationContext* opCtx, DiskLoc firstExt, DiskLoc lastExt) {
+ Lock::ResourceLock rlk(opCtx->lockState(), _rid, MODE_X);
if (firstExt.isNull() && lastExt.isNull())
return;
@@ -563,14 +563,14 @@ void MmapV1ExtentManager::freeExtents(OperationContext* txn, DiskLoc firstExt, D
}
if (_getFreeListStart().isNull()) {
- _setFreeListStart(txn, firstExt);
- _setFreeListEnd(txn, lastExt);
+ _setFreeListStart(opCtx, firstExt);
+ _setFreeListEnd(opCtx, lastExt);
} else {
DiskLoc a = _getFreeListStart();
invariant(getExtent(a)->xprev.isNull());
- *txn->recoveryUnit()->writing(&getExtent(a)->xprev) = lastExt;
- *txn->recoveryUnit()->writing(&getExtent(lastExt)->xnext) = a;
- _setFreeListStart(txn, firstExt);
+ *opCtx->recoveryUnit()->writing(&getExtent(a)->xprev) = lastExt;
+ *opCtx->recoveryUnit()->writing(&getExtent(lastExt)->xnext) = a;
+ _setFreeListStart(opCtx, firstExt);
}
}
@@ -588,22 +588,22 @@ DiskLoc MmapV1ExtentManager::_getFreeListEnd() const {
return file->header()->freeListEnd;
}
-void MmapV1ExtentManager::_setFreeListStart(OperationContext* txn, DiskLoc loc) {
+void MmapV1ExtentManager::_setFreeListStart(OperationContext* opCtx, DiskLoc loc) {
invariant(!_files.empty());
DataFile* file = _files[0];
- *txn->recoveryUnit()->writing(&file->header()->freeListStart) = loc;
+ *opCtx->recoveryUnit()->writing(&file->header()->freeListStart) = loc;
}
-void MmapV1ExtentManager::_setFreeListEnd(OperationContext* txn, DiskLoc loc) {
+void MmapV1ExtentManager::_setFreeListEnd(OperationContext* opCtx, DiskLoc loc) {
invariant(!_files.empty());
DataFile* file = _files[0];
- *txn->recoveryUnit()->writing(&file->header()->freeListEnd) = loc;
+ *opCtx->recoveryUnit()->writing(&file->header()->freeListEnd) = loc;
}
-void MmapV1ExtentManager::freeListStats(OperationContext* txn,
+void MmapV1ExtentManager::freeListStats(OperationContext* opCtx,
int* numExtents,
int64_t* totalFreeSizeBytes) const {
- Lock::ResourceLock rlk(txn->lockState(), _rid, MODE_S);
+ Lock::ResourceLock rlk(opCtx->lockState(), _rid, MODE_S);
invariant(numExtents);
invariant(totalFreeSizeBytes);
@@ -644,9 +644,9 @@ MmapV1ExtentManager::FilesArray::~FilesArray() {
}
}
-void MmapV1ExtentManager::FilesArray::close(OperationContext* txn) {
+void MmapV1ExtentManager::FilesArray::close(OperationContext* opCtx) {
for (int i = 0; i < size(); i++) {
- _files[i]->close(txn);
+ _files[i]->close(opCtx);
}
}
@@ -659,7 +659,7 @@ void MmapV1ExtentManager::FilesArray::push_back(DataFile* val) {
_size.store(n + 1);
}
-DataFileVersion MmapV1ExtentManager::getFileFormat(OperationContext* txn) const {
+DataFileVersion MmapV1ExtentManager::getFileFormat(OperationContext* opCtx) const {
if (numFiles() == 0)
return DataFileVersion(0, 0);
@@ -667,12 +667,12 @@ DataFileVersion MmapV1ExtentManager::getFileFormat(OperationContext* txn) const
return _getOpenFile(0)->getHeader()->version;
}
-void MmapV1ExtentManager::setFileFormat(OperationContext* txn, DataFileVersion newVersion) {
+void MmapV1ExtentManager::setFileFormat(OperationContext* opCtx, DataFileVersion newVersion) {
invariant(numFiles() > 0);
DataFile* df = _getOpenFile(0);
invariant(df);
- *txn->recoveryUnit()->writing(&df->getHeader()->version) = newVersion;
+ *opCtx->recoveryUnit()->writing(&df->getHeader()->version) = newVersion;
}
}
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h
index fb891ee8227..dff9de9efe9 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h
@@ -92,34 +92,34 @@ public:
/**
* Must be called before destruction.
*/
- void close(OperationContext* txn) {
- _files.close(txn);
+ void close(OperationContext* opCtx) {
+ _files.close(opCtx);
}
/**
* opens all current files, not thread safe
*/
- Status init(OperationContext* txn);
+ Status init(OperationContext* opCtx);
int numFiles() const;
long long fileSize() const;
// must call Extent::reuse on the returned extent
- DiskLoc allocateExtent(OperationContext* txn, bool capped, int size, bool enforceQuota);
+ DiskLoc allocateExtent(OperationContext* opCtx, bool capped, int size, bool enforceQuota);
/**
* firstExt has to be == lastExt or a chain
*/
- void freeExtents(OperationContext* txn, DiskLoc firstExt, DiskLoc lastExt);
+ void freeExtents(OperationContext* opCtx, DiskLoc firstExt, DiskLoc lastExt);
/**
* frees a single extent
* ignores all fields in the Extent except: magic, myLoc, length
*/
- void freeExtent(OperationContext* txn, DiskLoc extent);
+ void freeExtent(OperationContext* opCtx, DiskLoc extent);
- void freeListStats(OperationContext* txn, int* numExtents, int64_t* totalFreeSizeBytes) const;
+ void freeListStats(OperationContext* opCtx, int* numExtents, int64_t* totalFreeSizeBytes) const;
/**
* @param loc - has to be for a specific MmapV1RecordHeader
@@ -152,8 +152,8 @@ public:
/**
* Not thread safe, requires a database exclusive lock
*/
- DataFileVersion getFileFormat(OperationContext* txn) const final;
- void setFileFormat(OperationContext* txn, DataFileVersion newVersion) final;
+ DataFileVersion getFileFormat(OperationContext* opCtx) const final;
+ void setFileFormat(OperationContext* opCtx, DataFileVersion newVersion) final;
const DataFile* getOpenFile(int n) const final {
return _getOpenFile(n);
@@ -167,13 +167,13 @@ private:
/**
* will return NULL if nothing suitable in free list
*/
- DiskLoc _allocFromFreeList(OperationContext* txn, int approxSize, bool capped);
+ DiskLoc _allocFromFreeList(OperationContext* opCtx, int approxSize, bool capped);
/* allocate a new Extent, does not check free list
*/
- DiskLoc _createExtent(OperationContext* txn, int approxSize, bool enforceQuota);
+ DiskLoc _createExtent(OperationContext* opCtx, int approxSize, bool enforceQuota);
- DataFile* _addAFile(OperationContext* txn, int sizeNeeded, bool preallocateNextFile);
+ DataFile* _addAFile(OperationContext* opCtx, int sizeNeeded, bool preallocateNextFile);
/**
@@ -184,14 +184,14 @@ private:
DiskLoc _getFreeListStart() const;
DiskLoc _getFreeListEnd() const;
- void _setFreeListStart(OperationContext* txn, DiskLoc loc);
- void _setFreeListEnd(OperationContext* txn, DiskLoc loc);
+ void _setFreeListStart(OperationContext* opCtx, DiskLoc loc);
+ void _setFreeListEnd(OperationContext* opCtx, DiskLoc loc);
const DataFile* _getOpenFile(int fileId) const;
DataFile* _getOpenFile(int fileId);
DiskLoc _createExtentInFile(
- OperationContext* txn, int fileNo, DataFile* f, int size, bool enforceQuota);
+ OperationContext* opCtx, int fileNo, DataFile* f, int size, bool enforceQuota);
boost::filesystem::path _fileName(int n) const;
@@ -219,7 +219,7 @@ private:
/**
* Must be called before destruction.
*/
- void close(OperationContext* txn);
+ void close(OperationContext* opCtx);
/**
* Returns file at location 'n' in the array, with 'n' less than number of files added.
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp
index 29c7e0e92c7..ce670175fbd 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp
@@ -50,25 +50,25 @@ public:
MyHarnessHelper() {}
virtual std::unique_ptr<RecordStore> newNonCappedRecordStore() {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
auto md = stdx::make_unique<DummyRecordStoreV1MetaData>(false, 0);
- md->setUserFlag(&txn, CollectionOptions::Flag_NoPadding);
- return stdx::make_unique<SimpleRecordStoreV1>(&txn, "a.b", md.release(), &_em, false);
+ md->setUserFlag(&opCtx, CollectionOptions::Flag_NoPadding);
+ return stdx::make_unique<SimpleRecordStoreV1>(&opCtx, "a.b", md.release(), &_em, false);
}
std::unique_ptr<RecordStore> newCappedRecordStore(int64_t cappedMaxSize,
int64_t cappedMaxDocs) final {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
auto md = stdx::make_unique<DummyRecordStoreV1MetaData>(true, 0);
auto md_ptr = md.get();
- std::unique_ptr<RecordStore> rs =
- stdx::make_unique<CappedRecordStoreV1>(&txn, nullptr, "a.b", md.release(), &_em, false);
+ std::unique_ptr<RecordStore> rs = stdx::make_unique<CappedRecordStoreV1>(
+ &opCtx, nullptr, "a.b", md.release(), &_em, false);
LocAndSize records[] = {{}};
LocAndSize drecs[] = {{DiskLoc(0, 1000), 1000}, {}};
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid());
- initializeV1RS(&txn, records, drecs, NULL, &_em, md_ptr);
+ md->setCapExtent(&opCtx, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&opCtx, DiskLoc().setInvalid());
+ initializeV1RS(&opCtx, records, drecs, NULL, &_em, md_ptr);
return rs;
}
diff --git a/src/mongo/db/storage/mmap_v1/mmap_windows.cpp b/src/mongo/db/storage/mmap_v1/mmap_windows.cpp
index d8e8d61e624..e34bee74ade 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_windows.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_windows.cpp
@@ -148,8 +148,8 @@ static void* getNextMemoryMappedFileLocation(unsigned long long mmfSize) {
return reinterpret_cast<void*>(static_cast<uintptr_t>(thisMemoryMappedFileLocation));
}
-void MemoryMappedFile::close(OperationContext* txn) {
- LockMongoFilesShared::assertExclusivelyLocked(txn);
+void MemoryMappedFile::close(OperationContext* opCtx) {
+ LockMongoFilesShared::assertExclusivelyLocked(opCtx);
// Prevent flush and close from concurrently running
stdx::lock_guard<stdx::mutex> lk(_flushMutex);
@@ -174,18 +174,18 @@ void MemoryMappedFile::close(OperationContext* txn) {
fd = 0;
}
- destroyed(txn); // cleans up from the master list of mmaps
+ destroyed(opCtx); // cleans up from the master list of mmaps
}
bool MemoryMappedFile::isClosed() {
return !len && !fd && !views.size();
}
-void* MemoryMappedFile::map(OperationContext* txn,
+void* MemoryMappedFile::map(OperationContext* opCtx,
const char* filenameIn,
unsigned long long& length) {
verify(fd == 0 && len == 0); // can't open more than once
- setFilename(txn, filenameIn);
+ setFilename(opCtx, filenameIn);
FileAllocator::get()->allocateAsap(filenameIn, length);
/* big hack here: Babble uses db names with colons. doesn't seem to work on windows. temporary
* perhaps. */
@@ -244,8 +244,8 @@ void* MemoryMappedFile::map(OperationContext* txn,
severe() << "CreateFileMappingW for " << filename << " failed with "
<< errnoWithDescription(dosError) << " (file size is " << length << ")"
<< " in MemoryMappedFile::map" << endl;
- LockMongoFilesExclusive lock(txn);
- close(txn);
+ LockMongoFilesExclusive lock(opCtx);
+ close(opCtx);
fassertFailed(16225);
}
}
@@ -296,8 +296,8 @@ void* MemoryMappedFile::map(OperationContext* txn,
<< length << ")"
<< " in MemoryMappedFile::map" << endl;
- LockMongoFilesExclusive lock(txn);
- close(txn);
+ LockMongoFilesExclusive lock(opCtx);
+ close(opCtx);
fassertFailed(16166);
}
@@ -359,8 +359,8 @@ void* MemoryMappedFile::createPrivateMap() {
return privateMapAddress;
}
-void* MemoryMappedFile::remapPrivateView(OperationContext* txn, void* oldPrivateAddr) {
- LockMongoFilesExclusive lockMongoFiles(txn);
+void* MemoryMappedFile::remapPrivateView(OperationContext* opCtx, void* oldPrivateAddr) {
+ LockMongoFilesExclusive lockMongoFiles(opCtx);
privateViews.clearWritableBits(oldPrivateAddr, len);
@@ -406,12 +406,12 @@ public:
_filename(filename),
_flushMutex(flushMutex) {}
- void flush(OperationContext* txn) {
+ void flush(OperationContext* opCtx) {
if (!_view || !_fd)
return;
{
- LockMongoFilesShared mmfilesLock(txn);
+ LockMongoFilesShared mmfilesLock(opCtx);
std::set<MongoFile*> mmfs = MongoFile::getAllFiles();
std::set<MongoFile*>::const_iterator it = mmfs.find(_theFile);
@@ -475,9 +475,9 @@ void MemoryMappedFile::flush(bool sync) {
uassert(13056, "Async flushing not supported on windows", sync);
if (!views.empty()) {
WindowsFlushable f(this, viewForFlushing(), fd, _uniqueId, filename(), _flushMutex);
- auto txn = cc().getOperationContext();
- invariant(txn);
- f.flush(txn);
+ auto opCtx = cc().getOperationContext();
+ invariant(opCtx);
+ f.flush(opCtx);
}
}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
index d67fa341cb4..2c49cf8b5c8 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
@@ -139,7 +139,7 @@ RecordStoreV1Base::RecordStoreV1Base(StringData ns,
RecordStoreV1Base::~RecordStoreV1Base() {}
-int64_t RecordStoreV1Base::storageSize(OperationContext* txn,
+int64_t RecordStoreV1Base::storageSize(OperationContext* opCtx,
BSONObjBuilder* extraInfo,
int level) const {
BSONArrayBuilder extentInfo;
@@ -147,7 +147,7 @@ int64_t RecordStoreV1Base::storageSize(OperationContext* txn,
int64_t total = 0;
int n = 0;
- DiskLoc cur = _details->firstExtent(txn);
+ DiskLoc cur = _details->firstExtent(opCtx);
while (!cur.isNull()) {
Extent* e = _extentManager->getExtent(cur);
@@ -170,11 +170,11 @@ int64_t RecordStoreV1Base::storageSize(OperationContext* txn,
return total;
}
-RecordData RecordStoreV1Base::dataFor(OperationContext* txn, const RecordId& loc) const {
+RecordData RecordStoreV1Base::dataFor(OperationContext* opCtx, const RecordId& loc) const {
return recordFor(DiskLoc::fromRecordId(loc))->toRecordData();
}
-bool RecordStoreV1Base::findRecord(OperationContext* txn,
+bool RecordStoreV1Base::findRecord(OperationContext* opCtx,
const RecordId& loc,
RecordData* rd) const {
// this is a bit odd, as the semantics of using the storage engine imply it _has_ to be.
@@ -202,28 +202,29 @@ DeletedRecord* RecordStoreV1Base::drec(const DiskLoc& loc) const {
return reinterpret_cast<DeletedRecord*>(recordFor(loc));
}
-Extent* RecordStoreV1Base::_getExtent(OperationContext* txn, const DiskLoc& loc) const {
+Extent* RecordStoreV1Base::_getExtent(OperationContext* opCtx, const DiskLoc& loc) const {
return _extentManager->getExtent(loc);
}
-DiskLoc RecordStoreV1Base::_getExtentLocForRecord(OperationContext* txn, const DiskLoc& loc) const {
+DiskLoc RecordStoreV1Base::_getExtentLocForRecord(OperationContext* opCtx,
+ const DiskLoc& loc) const {
return _extentManager->extentLocForV1(loc);
}
-DiskLoc RecordStoreV1Base::getNextRecord(OperationContext* txn, const DiskLoc& loc) const {
- DiskLoc next = getNextRecordInExtent(txn, loc);
+DiskLoc RecordStoreV1Base::getNextRecord(OperationContext* opCtx, const DiskLoc& loc) const {
+ DiskLoc next = getNextRecordInExtent(opCtx, loc);
if (!next.isNull()) {
return next;
}
// now traverse extents
- Extent* e = _getExtent(txn, _getExtentLocForRecord(txn, loc));
+ Extent* e = _getExtent(opCtx, _getExtentLocForRecord(opCtx, loc));
while (1) {
if (e->xnext.isNull())
return DiskLoc(); // end of collection
- e = _getExtent(txn, e->xnext);
+ e = _getExtent(opCtx, e->xnext);
if (!e->firstRecord.isNull())
break;
// entire extent could be empty, keep looking
@@ -231,19 +232,19 @@ DiskLoc RecordStoreV1Base::getNextRecord(OperationContext* txn, const DiskLoc& l
return e->firstRecord;
}
-DiskLoc RecordStoreV1Base::getPrevRecord(OperationContext* txn, const DiskLoc& loc) const {
- DiskLoc prev = getPrevRecordInExtent(txn, loc);
+DiskLoc RecordStoreV1Base::getPrevRecord(OperationContext* opCtx, const DiskLoc& loc) const {
+ DiskLoc prev = getPrevRecordInExtent(opCtx, loc);
if (!prev.isNull()) {
return prev;
}
// now traverse extents
- Extent* e = _getExtent(txn, _getExtentLocForRecord(txn, loc));
+ Extent* e = _getExtent(opCtx, _getExtentLocForRecord(opCtx, loc));
while (1) {
if (e->xprev.isNull())
return DiskLoc(); // end of collection
- e = _getExtent(txn, e->xprev);
+ e = _getExtent(opCtx, e->xprev);
if (!e->firstRecord.isNull())
break;
// entire extent could be empty, keep looking
@@ -251,7 +252,7 @@ DiskLoc RecordStoreV1Base::getPrevRecord(OperationContext* txn, const DiskLoc& l
return e->lastRecord;
}
-DiskLoc RecordStoreV1Base::_findFirstSpot(OperationContext* txn,
+DiskLoc RecordStoreV1Base::_findFirstSpot(OperationContext* opCtx,
const DiskLoc& extDiskLoc,
Extent* e) {
DiskLoc emptyLoc = extDiskLoc;
@@ -266,14 +267,15 @@ DiskLoc RecordStoreV1Base::_findFirstSpot(OperationContext* txn,
ofs = newOfs;
}
- DeletedRecord* empty = txn->recoveryUnit()->writing(drec(emptyLoc));
+ DeletedRecord* empty = opCtx->recoveryUnit()->writing(drec(emptyLoc));
empty->lengthWithHeaders() = delRecLength;
empty->extentOfs() = e->myLoc.getOfs();
empty->nextDeleted().Null();
return emptyLoc;
}
-DiskLoc RecordStoreV1Base::getNextRecordInExtent(OperationContext* txn, const DiskLoc& loc) const {
+DiskLoc RecordStoreV1Base::getNextRecordInExtent(OperationContext* opCtx,
+ const DiskLoc& loc) const {
int nextOffset = recordFor(loc)->nextOfs();
if (nextOffset == DiskLoc::NullOfs)
@@ -284,7 +286,8 @@ DiskLoc RecordStoreV1Base::getNextRecordInExtent(OperationContext* txn, const Di
return result;
}
-DiskLoc RecordStoreV1Base::getPrevRecordInExtent(OperationContext* txn, const DiskLoc& loc) const {
+DiskLoc RecordStoreV1Base::getPrevRecordInExtent(OperationContext* opCtx,
+ const DiskLoc& loc) const {
int prevOffset = recordFor(loc)->prevOfs();
if (prevOffset == DiskLoc::NullOfs)
@@ -295,7 +298,7 @@ DiskLoc RecordStoreV1Base::getPrevRecordInExtent(OperationContext* txn, const Di
return result;
}
-Status RecordStoreV1Base::insertRecordsWithDocWriter(OperationContext* txn,
+Status RecordStoreV1Base::insertRecordsWithDocWriter(OperationContext* opCtx,
const DocWriter* const* docs,
size_t nDocs,
RecordId* idsOut) {
@@ -312,19 +315,19 @@ Status RecordStoreV1Base::insertRecordsWithDocWriter(OperationContext* txn,
? quantizeAllocationSpace(lenWHdr)
: lenWHdr;
- StatusWith<DiskLoc> loc = allocRecord(txn, lenToAlloc, /*enforceQuota=*/false);
+ StatusWith<DiskLoc> loc = allocRecord(opCtx, lenToAlloc, /*enforceQuota=*/false);
if (!loc.isOK())
return loc.getStatus();
MmapV1RecordHeader* r = recordFor(loc.getValue());
fassert(17319, r->lengthWithHeaders() >= lenWHdr);
- r = reinterpret_cast<MmapV1RecordHeader*>(txn->recoveryUnit()->writingPtr(r, lenWHdr));
+ r = reinterpret_cast<MmapV1RecordHeader*>(opCtx->recoveryUnit()->writingPtr(r, lenWHdr));
docs[i]->writeDocument(r->data());
- _addRecordToRecListInExtent(txn, r, loc.getValue());
+ _addRecordToRecListInExtent(opCtx, r, loc.getValue());
- _details->incrementStats(txn, r->netLength(), 1);
+ _details->incrementStats(opCtx, r->netLength(), 1);
if (idsOut)
idsOut[i] = loc.getValue().toRecordId();
@@ -335,7 +338,7 @@ Status RecordStoreV1Base::insertRecordsWithDocWriter(OperationContext* txn,
}
-StatusWith<RecordId> RecordStoreV1Base::insertRecord(OperationContext* txn,
+StatusWith<RecordId> RecordStoreV1Base::insertRecord(OperationContext* opCtx,
const char* data,
int len,
bool enforceQuota) {
@@ -347,10 +350,10 @@ StatusWith<RecordId> RecordStoreV1Base::insertRecord(OperationContext* txn,
return StatusWith<RecordId>(ErrorCodes::InvalidLength, "record has to be <= 16.5MB");
}
- return _insertRecord(txn, data, len, enforceQuota);
+ return _insertRecord(opCtx, data, len, enforceQuota);
}
-StatusWith<RecordId> RecordStoreV1Base::_insertRecord(OperationContext* txn,
+StatusWith<RecordId> RecordStoreV1Base::_insertRecord(OperationContext* opCtx,
const char* data,
int len,
bool enforceQuota) {
@@ -358,7 +361,7 @@ StatusWith<RecordId> RecordStoreV1Base::_insertRecord(OperationContext* txn,
const int lenToAlloc = shouldPadInserts() ? quantizeAllocationSpace(lenWHdr) : lenWHdr;
fassert(17208, lenToAlloc >= lenWHdr);
- StatusWith<DiskLoc> loc = allocRecord(txn, lenToAlloc, enforceQuota);
+ StatusWith<DiskLoc> loc = allocRecord(opCtx, lenToAlloc, enforceQuota);
if (!loc.isOK())
return StatusWith<RecordId>(loc.getStatus());
@@ -366,17 +369,17 @@ StatusWith<RecordId> RecordStoreV1Base::_insertRecord(OperationContext* txn,
fassert(17210, r->lengthWithHeaders() >= lenWHdr);
// copy the data
- r = reinterpret_cast<MmapV1RecordHeader*>(txn->recoveryUnit()->writingPtr(r, lenWHdr));
+ r = reinterpret_cast<MmapV1RecordHeader*>(opCtx->recoveryUnit()->writingPtr(r, lenWHdr));
memcpy(r->data(), data, len);
- _addRecordToRecListInExtent(txn, r, loc.getValue());
+ _addRecordToRecListInExtent(opCtx, r, loc.getValue());
- _details->incrementStats(txn, r->netLength(), 1);
+ _details->incrementStats(opCtx, r->netLength(), 1);
return StatusWith<RecordId>(loc.getValue().toRecordId());
}
-Status RecordStoreV1Base::updateRecord(OperationContext* txn,
+Status RecordStoreV1Base::updateRecord(OperationContext* opCtx,
const RecordId& oldLocation,
const char* data,
int dataSize,
@@ -386,13 +389,13 @@ Status RecordStoreV1Base::updateRecord(OperationContext* txn,
if (oldRecord->netLength() >= dataSize) {
// Make sure to notify other queries before we do an in-place update.
if (notifier) {
- Status callbackStatus = notifier->recordStoreGoingToUpdateInPlace(txn, oldLocation);
+ Status callbackStatus = notifier->recordStoreGoingToUpdateInPlace(opCtx, oldLocation);
if (!callbackStatus.isOK())
return callbackStatus;
}
// we fit
- memcpy(txn->recoveryUnit()->writingPtr(oldRecord->data(), dataSize), data, dataSize);
+ memcpy(opCtx->recoveryUnit()->writingPtr(oldRecord->data(), dataSize), data, dataSize);
return Status::OK();
}
@@ -407,7 +410,7 @@ bool RecordStoreV1Base::updateWithDamagesSupported() const {
}
StatusWith<RecordData> RecordStoreV1Base::updateWithDamages(
- OperationContext* txn,
+ OperationContext* opCtx,
const RecordId& loc,
const RecordData& oldRec,
const char* damageSource,
@@ -420,14 +423,15 @@ StatusWith<RecordData> RecordStoreV1Base::updateWithDamages(
const mutablebson::DamageVector::const_iterator end = damages.end();
for (; where != end; ++where) {
const char* sourcePtr = damageSource + where->sourceOffset;
- void* targetPtr = txn->recoveryUnit()->writingPtr(root + where->targetOffset, where->size);
+ void* targetPtr =
+ opCtx->recoveryUnit()->writingPtr(root + where->targetOffset, where->size);
std::memcpy(targetPtr, sourcePtr, where->size);
}
return rec->toRecordData();
}
-void RecordStoreV1Base::deleteRecord(OperationContext* txn, const RecordId& rid) {
+void RecordStoreV1Base::deleteRecord(OperationContext* opCtx, const RecordId& rid) {
const DiskLoc dl = DiskLoc::fromRecordId(rid);
MmapV1RecordHeader* todelete = recordFor(dl);
@@ -436,31 +440,31 @@ void RecordStoreV1Base::deleteRecord(OperationContext* txn, const RecordId& rid)
/* remove ourself from the record next/prev chain */
{
if (todelete->prevOfs() != DiskLoc::NullOfs) {
- DiskLoc prev = getPrevRecordInExtent(txn, dl);
+ DiskLoc prev = getPrevRecordInExtent(opCtx, dl);
MmapV1RecordHeader* prevRecord = recordFor(prev);
- txn->recoveryUnit()->writingInt(prevRecord->nextOfs()) = todelete->nextOfs();
+ opCtx->recoveryUnit()->writingInt(prevRecord->nextOfs()) = todelete->nextOfs();
}
if (todelete->nextOfs() != DiskLoc::NullOfs) {
- DiskLoc next = getNextRecord(txn, dl);
+ DiskLoc next = getNextRecord(opCtx, dl);
MmapV1RecordHeader* nextRecord = recordFor(next);
- txn->recoveryUnit()->writingInt(nextRecord->prevOfs()) = todelete->prevOfs();
+ opCtx->recoveryUnit()->writingInt(nextRecord->prevOfs()) = todelete->prevOfs();
}
}
/* remove ourself from extent pointers */
{
DiskLoc extentLoc = todelete->myExtentLoc(dl);
- Extent* e = _getExtent(txn, extentLoc);
+ Extent* e = _getExtent(opCtx, extentLoc);
if (e->firstRecord == dl) {
- txn->recoveryUnit()->writing(&e->firstRecord);
+ opCtx->recoveryUnit()->writing(&e->firstRecord);
if (todelete->nextOfs() == DiskLoc::NullOfs)
e->firstRecord.Null();
else
e->firstRecord.set(dl.a(), todelete->nextOfs());
}
if (e->lastRecord == dl) {
- txn->recoveryUnit()->writing(&e->lastRecord);
+ opCtx->recoveryUnit()->writing(&e->lastRecord);
if (todelete->prevOfs() == DiskLoc::NullOfs)
e->lastRecord.Null();
else
@@ -470,7 +474,7 @@ void RecordStoreV1Base::deleteRecord(OperationContext* txn, const RecordId& rid)
/* add to the free list */
{
- _details->incrementStats(txn, -1 * todelete->netLength(), -1);
+ _details->incrementStats(opCtx, -1 * todelete->netLength(), -1);
if (_isSystemIndexes) {
/* temp: if in system.indexes, don't reuse, and zero out: we want to be
@@ -478,76 +482,76 @@ void RecordStoreV1Base::deleteRecord(OperationContext* txn, const RecordId& rid)
to this disk location. so an incorrectly done remove would cause
a lot of problems.
*/
- memset(txn->recoveryUnit()->writingPtr(todelete, todelete->lengthWithHeaders()),
+ memset(opCtx->recoveryUnit()->writingPtr(todelete, todelete->lengthWithHeaders()),
0,
todelete->lengthWithHeaders());
} else {
// this is defensive so we can detect if we are still using a location
// that was deleted
- memset(txn->recoveryUnit()->writingPtr(todelete->data(), 4), 0xee, 4);
- addDeletedRec(txn, dl);
+ memset(opCtx->recoveryUnit()->writingPtr(todelete->data(), 4), 0xee, 4);
+ addDeletedRec(opCtx, dl);
}
}
}
-std::unique_ptr<RecordCursor> RecordStoreV1Base::getCursorForRepair(OperationContext* txn) const {
- return stdx::make_unique<RecordStoreV1RepairCursor>(txn, this);
+std::unique_ptr<RecordCursor> RecordStoreV1Base::getCursorForRepair(OperationContext* opCtx) const {
+ return stdx::make_unique<RecordStoreV1RepairCursor>(opCtx, this);
}
-void RecordStoreV1Base::_addRecordToRecListInExtent(OperationContext* txn,
+void RecordStoreV1Base::_addRecordToRecListInExtent(OperationContext* opCtx,
MmapV1RecordHeader* r,
DiskLoc loc) {
dassert(recordFor(loc) == r);
- DiskLoc extentLoc = _getExtentLocForRecord(txn, loc);
- Extent* e = _getExtent(txn, extentLoc);
+ DiskLoc extentLoc = _getExtentLocForRecord(opCtx, loc);
+ Extent* e = _getExtent(opCtx, extentLoc);
if (e->lastRecord.isNull()) {
- *txn->recoveryUnit()->writing(&e->firstRecord) = loc;
- *txn->recoveryUnit()->writing(&e->lastRecord) = loc;
+ *opCtx->recoveryUnit()->writing(&e->firstRecord) = loc;
+ *opCtx->recoveryUnit()->writing(&e->lastRecord) = loc;
r->prevOfs() = r->nextOfs() = DiskLoc::NullOfs;
} else {
MmapV1RecordHeader* oldlast = recordFor(e->lastRecord);
r->prevOfs() = e->lastRecord.getOfs();
r->nextOfs() = DiskLoc::NullOfs;
- txn->recoveryUnit()->writingInt(oldlast->nextOfs()) = loc.getOfs();
- *txn->recoveryUnit()->writing(&e->lastRecord) = loc;
+ opCtx->recoveryUnit()->writingInt(oldlast->nextOfs()) = loc.getOfs();
+ *opCtx->recoveryUnit()->writing(&e->lastRecord) = loc;
}
}
-void RecordStoreV1Base::increaseStorageSize(OperationContext* txn, int size, bool enforceQuota) {
- DiskLoc eloc = _extentManager->allocateExtent(txn, isCapped(), size, enforceQuota);
+void RecordStoreV1Base::increaseStorageSize(OperationContext* opCtx, int size, bool enforceQuota) {
+ DiskLoc eloc = _extentManager->allocateExtent(opCtx, isCapped(), size, enforceQuota);
Extent* e = _extentManager->getExtent(eloc);
invariant(e);
- *txn->recoveryUnit()->writing(&e->nsDiagnostic) = _ns;
+ *opCtx->recoveryUnit()->writing(&e->nsDiagnostic) = _ns;
- txn->recoveryUnit()->writing(&e->xnext)->Null();
- txn->recoveryUnit()->writing(&e->xprev)->Null();
- txn->recoveryUnit()->writing(&e->firstRecord)->Null();
- txn->recoveryUnit()->writing(&e->lastRecord)->Null();
+ opCtx->recoveryUnit()->writing(&e->xnext)->Null();
+ opCtx->recoveryUnit()->writing(&e->xprev)->Null();
+ opCtx->recoveryUnit()->writing(&e->firstRecord)->Null();
+ opCtx->recoveryUnit()->writing(&e->lastRecord)->Null();
- DiskLoc emptyLoc = _findFirstSpot(txn, eloc, e);
+ DiskLoc emptyLoc = _findFirstSpot(opCtx, eloc, e);
- if (_details->lastExtent(txn).isNull()) {
- invariant(_details->firstExtent(txn).isNull());
- _details->setFirstExtent(txn, eloc);
- _details->setLastExtent(txn, eloc);
- _details->setCapExtent(txn, eloc);
+ if (_details->lastExtent(opCtx).isNull()) {
+ invariant(_details->firstExtent(opCtx).isNull());
+ _details->setFirstExtent(opCtx, eloc);
+ _details->setLastExtent(opCtx, eloc);
+ _details->setCapExtent(opCtx, eloc);
invariant(e->xprev.isNull());
invariant(e->xnext.isNull());
} else {
- invariant(!_details->firstExtent(txn).isNull());
- *txn->recoveryUnit()->writing(&e->xprev) = _details->lastExtent(txn);
- *txn->recoveryUnit()->writing(
- &_extentManager->getExtent(_details->lastExtent(txn))->xnext) = eloc;
- _details->setLastExtent(txn, eloc);
+ invariant(!_details->firstExtent(opCtx).isNull());
+ *opCtx->recoveryUnit()->writing(&e->xprev) = _details->lastExtent(opCtx);
+ *opCtx->recoveryUnit()->writing(
+ &_extentManager->getExtent(_details->lastExtent(opCtx))->xnext) = eloc;
+ _details->setLastExtent(opCtx, eloc);
}
- _details->setLastExtentSize(txn, e->length);
+ _details->setLastExtentSize(opCtx, e->length);
- addDeletedRec(txn, emptyLoc);
+ addDeletedRec(opCtx, emptyLoc);
}
-Status RecordStoreV1Base::validate(OperationContext* txn,
+Status RecordStoreV1Base::validate(OperationContext* opCtx,
ValidateCmdLevel level,
ValidateAdaptor* adaptor,
ValidateResults* results,
@@ -568,22 +572,22 @@ Status RecordStoreV1Base::validate(OperationContext* txn,
output->appendNumber("datasize", _details->dataSize());
output->appendNumber("nrecords", _details->numRecords());
- output->appendNumber("lastExtentSize", _details->lastExtentSize(txn));
+ output->appendNumber("lastExtentSize", _details->lastExtentSize(opCtx));
- if (_details->firstExtent(txn).isNull())
+ if (_details->firstExtent(opCtx).isNull())
output->append("firstExtent", "null");
else
- output->append(
- "firstExtent",
- str::stream() << _details->firstExtent(txn).toString() << " ns:"
- << _getExtent(txn, _details->firstExtent(txn))->nsDiagnostic.toString());
- if (_details->lastExtent(txn).isNull())
+ output->append("firstExtent",
+ str::stream() << _details->firstExtent(opCtx).toString() << " ns:"
+ << _getExtent(opCtx, _details->firstExtent(opCtx))
+ ->nsDiagnostic.toString());
+ if (_details->lastExtent(opCtx).isNull())
output->append("lastExtent", "null");
else
- output->append(
- "lastExtent",
- str::stream() << _details->lastExtent(txn).toString() << " ns:"
- << _getExtent(txn, _details->lastExtent(txn))->nsDiagnostic.toString());
+ output->append("lastExtent",
+ str::stream() << _details->lastExtent(opCtx).toString() << " ns:"
+ << _getExtent(opCtx, _details->lastExtent(opCtx))
+ ->nsDiagnostic.toString());
// 22222222222222222222222222
{ // validate extent basics
@@ -591,14 +595,14 @@ Status RecordStoreV1Base::validate(OperationContext* txn,
int extentCount = 0;
DiskLoc extentDiskLoc;
try {
- if (!_details->firstExtent(txn).isNull()) {
- _getExtent(txn, _details->firstExtent(txn))->assertOk();
- _getExtent(txn, _details->lastExtent(txn))->assertOk();
+ if (!_details->firstExtent(opCtx).isNull()) {
+ _getExtent(opCtx, _details->firstExtent(opCtx))->assertOk();
+ _getExtent(opCtx, _details->lastExtent(opCtx))->assertOk();
}
- extentDiskLoc = _details->firstExtent(txn);
+ extentDiskLoc = _details->firstExtent(opCtx);
while (!extentDiskLoc.isNull()) {
- Extent* thisExtent = _getExtent(txn, extentDiskLoc);
+ Extent* thisExtent = _getExtent(opCtx, extentDiskLoc);
if (level == kValidateFull) {
extentData << thisExtent->dump();
}
@@ -608,24 +612,24 @@ Status RecordStoreV1Base::validate(OperationContext* txn,
DiskLoc nextDiskLoc = thisExtent->xnext;
if (extentCount > 0 && !nextDiskLoc.isNull() &&
- _getExtent(txn, nextDiskLoc)->xprev != extentDiskLoc) {
+ _getExtent(opCtx, nextDiskLoc)->xprev != extentDiskLoc) {
StringBuilder sb;
- sb << "'xprev' pointer " << _getExtent(txn, nextDiskLoc)->xprev.toString()
+ sb << "'xprev' pointer " << _getExtent(opCtx, nextDiskLoc)->xprev.toString()
<< " in extent " << nextDiskLoc.toString() << " does not point to extent "
<< extentDiskLoc.toString();
results->errors.push_back(sb.str());
results->valid = false;
}
- if (nextDiskLoc.isNull() && extentDiskLoc != _details->lastExtent(txn)) {
+ if (nextDiskLoc.isNull() && extentDiskLoc != _details->lastExtent(opCtx)) {
StringBuilder sb;
- sb << "'lastExtent' pointer " << _details->lastExtent(txn).toString()
+ sb << "'lastExtent' pointer " << _details->lastExtent(opCtx).toString()
<< " does not point to last extent in list " << extentDiskLoc.toString();
results->errors.push_back(sb.str());
results->valid = false;
}
extentDiskLoc = nextDiskLoc;
extentCount++;
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
}
} catch (const DBException& e) {
StringBuilder sb;
@@ -644,31 +648,31 @@ Status RecordStoreV1Base::validate(OperationContext* txn,
// 333333333333333333333333333
bool testingLastExtent = false;
try {
- DiskLoc firstExtentLoc = _details->firstExtent(txn);
+ DiskLoc firstExtentLoc = _details->firstExtent(opCtx);
if (firstExtentLoc.isNull()) {
// this is ok
} else {
- output->append("firstExtentDetails", _getExtent(txn, firstExtentLoc)->dump());
- if (!_getExtent(txn, firstExtentLoc)->xprev.isNull()) {
+ output->append("firstExtentDetails", _getExtent(opCtx, firstExtentLoc)->dump());
+ if (!_getExtent(opCtx, firstExtentLoc)->xprev.isNull()) {
StringBuilder sb;
sb << "'xprev' pointer in 'firstExtent' "
- << _details->firstExtent(txn).toString() << " is "
- << _getExtent(txn, firstExtentLoc)->xprev.toString() << ", should be null";
+ << _details->firstExtent(opCtx).toString() << " is "
+ << _getExtent(opCtx, firstExtentLoc)->xprev.toString() << ", should be null";
results->errors.push_back(sb.str());
results->valid = false;
}
}
testingLastExtent = true;
- DiskLoc lastExtentLoc = _details->lastExtent(txn);
+ DiskLoc lastExtentLoc = _details->lastExtent(opCtx);
if (lastExtentLoc.isNull()) {
// this is ok
} else {
if (firstExtentLoc != lastExtentLoc) {
- output->append("lastExtentDetails", _getExtent(txn, lastExtentLoc)->dump());
- if (!_getExtent(txn, lastExtentLoc)->xnext.isNull()) {
+ output->append("lastExtentDetails", _getExtent(opCtx, lastExtentLoc)->dump());
+ if (!_getExtent(opCtx, lastExtentLoc)->xnext.isNull()) {
StringBuilder sb;
sb << "'xnext' pointer in 'lastExtent' " << lastExtentLoc.toString()
- << " is " << _getExtent(txn, lastExtentLoc)->xnext.toString()
+ << " is " << _getExtent(opCtx, lastExtentLoc)->xnext.toString()
<< ", should be null";
results->errors.push_back(sb.str());
results->valid = false;
@@ -696,7 +700,7 @@ Status RecordStoreV1Base::validate(OperationContext* txn,
int outOfOrder = 0;
DiskLoc dl_last;
- auto cursor = getCursor(txn);
+ auto cursor = getCursor(opCtx);
while (auto record = cursor->next()) {
const auto dl = DiskLoc::fromRecordId(record->id);
n++;
@@ -800,7 +804,7 @@ Status RecordStoreV1Base::validate(OperationContext* txn,
delSize += d->lengthWithHeaders();
loc = d->nextDeleted();
k++;
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
}
delBucketSizes << k;
} catch (...) {
@@ -829,10 +833,10 @@ Status RecordStoreV1Base::validate(OperationContext* txn,
return Status::OK();
}
-void RecordStoreV1Base::appendCustomStats(OperationContext* txn,
+void RecordStoreV1Base::appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* result,
double scale) const {
- result->append("lastExtentSize", _details->lastExtentSize(txn) / scale);
+ result->append("lastExtentSize", _details->lastExtentSize(opCtx) / scale);
result->append("paddingFactor", 1.0); // hard coded
result->append("paddingFactorNote",
"paddingFactor is unused and unmaintained in 3.0. It "
@@ -841,7 +845,8 @@ void RecordStoreV1Base::appendCustomStats(OperationContext* txn,
result->appendBool("capped", isCapped());
if (isCapped()) {
result->appendNumber("max", _details->maxCappedDocs());
- result->appendNumber("maxSize", static_cast<long long>(storageSize(txn, NULL, 0) / scale));
+ result->appendNumber("maxSize",
+ static_cast<long long>(storageSize(opCtx, NULL, 0) / scale));
}
}
@@ -853,13 +858,13 @@ struct touch_location {
};
}
-Status RecordStoreV1Base::touch(OperationContext* txn, BSONObjBuilder* output) const {
+Status RecordStoreV1Base::touch(OperationContext* opCtx, BSONObjBuilder* output) const {
Timer t;
std::vector<touch_location> ranges;
{
- DiskLoc nextLoc = _details->firstExtent(txn);
- Extent* ext = nextLoc.isNull() ? NULL : _getExtent(txn, nextLoc);
+ DiskLoc nextLoc = _details->firstExtent(opCtx);
+ Extent* ext = nextLoc.isNull() ? NULL : _getExtent(opCtx, nextLoc);
while (ext) {
touch_location tl;
tl.root = reinterpret_cast<const char*>(ext);
@@ -870,20 +875,20 @@ Status RecordStoreV1Base::touch(OperationContext* txn, BSONObjBuilder* output) c
if (nextLoc.isNull())
ext = NULL;
else
- ext = _getExtent(txn, nextLoc);
+ ext = _getExtent(opCtx, nextLoc);
}
}
std::string progress_msg = "touch " + ns() + " extents";
- stdx::unique_lock<Client> lk(*txn->getClient());
+ stdx::unique_lock<Client> lk(*opCtx->getClient());
ProgressMeterHolder pm(
- *txn->setMessage_inlock(progress_msg.c_str(), "Touch Progress", ranges.size()));
+ *opCtx->setMessage_inlock(progress_msg.c_str(), "Touch Progress", ranges.size()));
lk.unlock();
for (std::vector<touch_location>::iterator it = ranges.begin(); it != ranges.end(); ++it) {
touch_pages(it->root, it->length);
pm.hit();
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
}
pm.finished();
@@ -900,7 +905,7 @@ boost::optional<Record> RecordStoreV1Base::IntraExtentIterator::next() {
return {};
auto out = _curr.toRecordId();
advance();
- return {{out, _rs->dataFor(_txn, out)}};
+ return {{out, _rs->dataFor(_opCtx, out)}};
}
void RecordStoreV1Base::IntraExtentIterator::advance() {
@@ -912,13 +917,13 @@ void RecordStoreV1Base::IntraExtentIterator::advance() {
_curr = (nextOfs == DiskLoc::NullOfs ? DiskLoc() : DiskLoc(_curr.a(), nextOfs));
}
-void RecordStoreV1Base::IntraExtentIterator::invalidate(OperationContext* txn,
+void RecordStoreV1Base::IntraExtentIterator::invalidate(OperationContext* opCtx,
const RecordId& rid) {
if (rid == _curr.toRecordId()) {
const DiskLoc origLoc = _curr;
// Undo the advance on rollback, as the deletion that forced it "never happened".
- txn->recoveryUnit()->onRollback([this, origLoc]() { this->_curr = origLoc; });
+ opCtx->recoveryUnit()->onRollback([this, origLoc]() { this->_curr = origLoc; });
advance();
}
}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_base.h b/src/mongo/db/storage/mmap_v1/record_store_v1_base.h
index ae7f5dd656e..6dadf5487da 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_base.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_base.h
@@ -50,10 +50,10 @@ public:
virtual ~RecordStoreV1MetaData() {}
virtual const DiskLoc& capExtent() const = 0;
- virtual void setCapExtent(OperationContext* txn, const DiskLoc& loc) = 0;
+ virtual void setCapExtent(OperationContext* opCtx, const DiskLoc& loc) = 0;
virtual const DiskLoc& capFirstNewRecord() const = 0;
- virtual void setCapFirstNewRecord(OperationContext* txn, const DiskLoc& loc) = 0;
+ virtual void setCapFirstNewRecord(OperationContext* opCtx, const DiskLoc& loc) = 0;
bool capLooped() const {
return capFirstNewRecord().isValid();
@@ -62,36 +62,36 @@ public:
virtual long long dataSize() const = 0;
virtual long long numRecords() const = 0;
- virtual void incrementStats(OperationContext* txn,
+ virtual void incrementStats(OperationContext* opCtx,
long long dataSizeIncrement,
long long numRecordsIncrement) = 0;
- virtual void setStats(OperationContext* txn, long long dataSize, long long numRecords) = 0;
+ virtual void setStats(OperationContext* opCtx, long long dataSize, long long numRecords) = 0;
virtual DiskLoc deletedListEntry(int bucket) const = 0;
- virtual void setDeletedListEntry(OperationContext* txn, int bucket, const DiskLoc& loc) = 0;
+ virtual void setDeletedListEntry(OperationContext* opCtx, int bucket, const DiskLoc& loc) = 0;
virtual DiskLoc deletedListLegacyGrabBag() const = 0;
- virtual void setDeletedListLegacyGrabBag(OperationContext* txn, const DiskLoc& loc) = 0;
+ virtual void setDeletedListLegacyGrabBag(OperationContext* opCtx, const DiskLoc& loc) = 0;
- virtual void orphanDeletedList(OperationContext* txn) = 0;
+ virtual void orphanDeletedList(OperationContext* opCtx) = 0;
- virtual const DiskLoc& firstExtent(OperationContext* txn) const = 0;
- virtual void setFirstExtent(OperationContext* txn, const DiskLoc& loc) = 0;
+ virtual const DiskLoc& firstExtent(OperationContext* opCtx) const = 0;
+ virtual void setFirstExtent(OperationContext* opCtx, const DiskLoc& loc) = 0;
- virtual const DiskLoc& lastExtent(OperationContext* txn) const = 0;
- virtual void setLastExtent(OperationContext* txn, const DiskLoc& loc) = 0;
+ virtual const DiskLoc& lastExtent(OperationContext* opCtx) const = 0;
+ virtual void setLastExtent(OperationContext* opCtx, const DiskLoc& loc) = 0;
virtual bool isCapped() const = 0;
virtual bool isUserFlagSet(int flag) const = 0;
virtual int userFlags() const = 0;
- virtual bool setUserFlag(OperationContext* txn, int flag) = 0;
- virtual bool clearUserFlag(OperationContext* txn, int flag) = 0;
- virtual bool replaceUserFlags(OperationContext* txn, int flags) = 0;
+ virtual bool setUserFlag(OperationContext* opCtx, int flag) = 0;
+ virtual bool clearUserFlag(OperationContext* opCtx, int flag) = 0;
+ virtual bool replaceUserFlags(OperationContext* opCtx, int flags) = 0;
- virtual int lastExtentSize(OperationContext* txn) const = 0;
- virtual void setLastExtentSize(OperationContext* txn, int newMax) = 0;
+ virtual int lastExtentSize(OperationContext* opCtx) const = 0;
+ virtual void setLastExtentSize(OperationContext* opCtx, int newMax) = 0;
virtual long long maxCappedDocs() const = 0;
};
@@ -172,34 +172,34 @@ public:
virtual ~RecordStoreV1Base();
- virtual long long dataSize(OperationContext* txn) const {
+ virtual long long dataSize(OperationContext* opCtx) const {
return _details->dataSize();
}
- virtual long long numRecords(OperationContext* txn) const {
+ virtual long long numRecords(OperationContext* opCtx) const {
return _details->numRecords();
}
- virtual int64_t storageSize(OperationContext* txn,
+ virtual int64_t storageSize(OperationContext* opCtx,
BSONObjBuilder* extraInfo = NULL,
int level = 0) const;
- virtual RecordData dataFor(OperationContext* txn, const RecordId& loc) const;
+ virtual RecordData dataFor(OperationContext* opCtx, const RecordId& loc) const;
- virtual bool findRecord(OperationContext* txn, const RecordId& loc, RecordData* rd) const;
+ virtual bool findRecord(OperationContext* opCtx, const RecordId& loc, RecordData* rd) const;
- void deleteRecord(OperationContext* txn, const RecordId& dl);
+ void deleteRecord(OperationContext* opCtx, const RecordId& dl);
- StatusWith<RecordId> insertRecord(OperationContext* txn,
+ StatusWith<RecordId> insertRecord(OperationContext* opCtx,
const char* data,
int len,
bool enforceQuota);
- Status insertRecordsWithDocWriter(OperationContext* txn,
+ Status insertRecordsWithDocWriter(OperationContext* opCtx,
const DocWriter* const* docs,
size_t nDocs,
RecordId* idsOut) final;
- virtual Status updateRecord(OperationContext* txn,
+ virtual Status updateRecord(OperationContext* opCtx,
const RecordId& oldLocation,
const char* data,
int len,
@@ -208,27 +208,27 @@ public:
virtual bool updateWithDamagesSupported() const;
- virtual StatusWith<RecordData> updateWithDamages(OperationContext* txn,
+ virtual StatusWith<RecordData> updateWithDamages(OperationContext* opCtx,
const RecordId& loc,
const RecordData& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages);
- virtual std::unique_ptr<RecordCursor> getCursorForRepair(OperationContext* txn) const;
+ virtual std::unique_ptr<RecordCursor> getCursorForRepair(OperationContext* opCtx) const;
- void increaseStorageSize(OperationContext* txn, int size, bool enforceQuota);
+ void increaseStorageSize(OperationContext* opCtx, int size, bool enforceQuota);
- virtual Status validate(OperationContext* txn,
+ virtual Status validate(OperationContext* opCtx,
ValidateCmdLevel level,
ValidateAdaptor* adaptor,
ValidateResults* results,
BSONObjBuilder* output);
- virtual void appendCustomStats(OperationContext* txn,
+ virtual void appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* result,
double scale) const;
- virtual Status touch(OperationContext* txn, BSONObjBuilder* output) const;
+ virtual Status touch(OperationContext* opCtx, BSONObjBuilder* output) const;
const RecordStoreV1MetaData* details() const {
return _details.get();
@@ -237,13 +237,13 @@ public:
// This keeps track of cursors saved during yielding, for invalidation purposes.
SavedCursorRegistry savedCursors;
- DiskLoc getExtentLocForRecord(OperationContext* txn, const DiskLoc& loc) const;
+ DiskLoc getExtentLocForRecord(OperationContext* opCtx, const DiskLoc& loc) const;
- DiskLoc getNextRecord(OperationContext* txn, const DiskLoc& loc) const;
- DiskLoc getPrevRecord(OperationContext* txn, const DiskLoc& loc) const;
+ DiskLoc getNextRecord(OperationContext* opCtx, const DiskLoc& loc) const;
+ DiskLoc getPrevRecord(OperationContext* opCtx, const DiskLoc& loc) const;
- DiskLoc getNextRecordInExtent(OperationContext* txn, const DiskLoc& loc) const;
- DiskLoc getPrevRecordInExtent(OperationContext* txn, const DiskLoc& loc) const;
+ DiskLoc getNextRecordInExtent(OperationContext* opCtx, const DiskLoc& loc) const;
+ DiskLoc getPrevRecordInExtent(OperationContext* opCtx, const DiskLoc& loc) const;
/**
* Quantize 'minSize' to the nearest allocation size.
@@ -255,9 +255,9 @@ public:
/* return which "deleted bucket" for this size object */
static int bucket(int size);
- void waitForAllEarlierOplogWritesToBeVisible(OperationContext* txn) const override {}
+ void waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx) const override {}
- virtual void updateStatsAfterRepair(OperationContext* txn,
+ virtual void updateStatsAfterRepair(OperationContext* opCtx,
long long numRecords,
long long dataSize) {
invariant(false); // MMAPv1 has its own repair which doesn't call this.
@@ -272,43 +272,43 @@ protected:
virtual bool shouldPadInserts() const = 0;
- virtual StatusWith<DiskLoc> allocRecord(OperationContext* txn,
+ virtual StatusWith<DiskLoc> allocRecord(OperationContext* opCtx,
int lengthWithHeaders,
bool enforceQuota) = 0;
// TODO: document, remove, what have you
- virtual void addDeletedRec(OperationContext* txn, const DiskLoc& dloc) = 0;
+ virtual void addDeletedRec(OperationContext* opCtx, const DiskLoc& dloc) = 0;
// TODO: another sad one
virtual DeletedRecord* drec(const DiskLoc& loc) const;
// just a wrapper for _extentManager->getExtent( loc );
- Extent* _getExtent(OperationContext* txn, const DiskLoc& loc) const;
+ Extent* _getExtent(OperationContext* opCtx, const DiskLoc& loc) const;
- DiskLoc _getExtentLocForRecord(OperationContext* txn, const DiskLoc& loc) const;
+ DiskLoc _getExtentLocForRecord(OperationContext* opCtx, const DiskLoc& loc) const;
- DiskLoc _getNextRecord(OperationContext* txn, const DiskLoc& loc) const;
- DiskLoc _getPrevRecord(OperationContext* txn, const DiskLoc& loc) const;
+ DiskLoc _getNextRecord(OperationContext* opCtx, const DiskLoc& loc) const;
+ DiskLoc _getPrevRecord(OperationContext* opCtx, const DiskLoc& loc) const;
- DiskLoc _getNextRecordInExtent(OperationContext* txn, const DiskLoc& loc) const;
- DiskLoc _getPrevRecordInExtent(OperationContext* txn, const DiskLoc& loc) const;
+ DiskLoc _getNextRecordInExtent(OperationContext* opCtx, const DiskLoc& loc) const;
+ DiskLoc _getPrevRecordInExtent(OperationContext* opCtx, const DiskLoc& loc) const;
/**
* finds the first suitable DiskLoc for data
* will return the DiskLoc of a newly created DeletedRecord
*/
- DiskLoc _findFirstSpot(OperationContext* txn, const DiskLoc& extDiskLoc, Extent* e);
+ DiskLoc _findFirstSpot(OperationContext* opCtx, const DiskLoc& extDiskLoc, Extent* e);
/** add a record to the end of the linked list chain within this extent.
require: you must have already declared write intent for the record header.
*/
- void _addRecordToRecListInExtent(OperationContext* txn, MmapV1RecordHeader* r, DiskLoc loc);
+ void _addRecordToRecListInExtent(OperationContext* opCtx, MmapV1RecordHeader* r, DiskLoc loc);
/**
* internal
* doesn't check inputs or change padding
*/
- StatusWith<RecordId> _insertRecord(OperationContext* txn,
+ StatusWith<RecordId> _insertRecord(OperationContext* opCtx,
const char* data,
int len,
bool enforceQuota);
@@ -327,23 +327,23 @@ protected:
*/
class RecordStoreV1Base::IntraExtentIterator final : public RecordCursor {
public:
- IntraExtentIterator(OperationContext* txn,
+ IntraExtentIterator(OperationContext* opCtx,
DiskLoc start,
const RecordStoreV1Base* rs,
bool forward = true)
- : _txn(txn), _curr(start), _rs(rs), _forward(forward) {}
+ : _opCtx(opCtx), _curr(start), _rs(rs), _forward(forward) {}
boost::optional<Record> next() final;
- void invalidate(OperationContext* txn, const RecordId& dl) final;
+ void invalidate(OperationContext* opCtx, const RecordId& dl) final;
void save() final {}
bool restore() final {
return true;
}
void detachFromOperationContext() final {
- _txn = nullptr;
+ _opCtx = nullptr;
}
- void reattachToOperationContext(OperationContext* txn) final {
- _txn = txn;
+ void reattachToOperationContext(OperationContext* opCtx) final {
+ _opCtx = opCtx;
}
std::unique_ptr<RecordFetcher> fetcherForNext() const final;
@@ -354,7 +354,7 @@ private:
void advance();
- OperationContext* _txn;
+ OperationContext* _opCtx;
DiskLoc _curr;
const RecordStoreV1Base* _rs;
bool _forward;
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp
index a4c62cf2d63..47b65e7dd16 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp
@@ -68,14 +68,14 @@ using std::endl;
using std::hex;
using std::vector;
-CappedRecordStoreV1::CappedRecordStoreV1(OperationContext* txn,
+CappedRecordStoreV1::CappedRecordStoreV1(OperationContext* opCtx,
CappedCallback* collection,
StringData ns,
RecordStoreV1MetaData* details,
ExtentManager* em,
bool isSystemIndexes)
: RecordStoreV1Base(ns, details, em, isSystemIndexes), _cappedCallback(collection) {
- DiskLoc extentLoc = details->firstExtent(txn);
+ DiskLoc extentLoc = details->firstExtent(opCtx);
while (!extentLoc.isNull()) {
_extentAdvice.push_back(_extentManager->cacheHint(extentLoc, ExtentManager::Sequential));
Extent* extent = em->getExtent(extentLoc);
@@ -83,12 +83,12 @@ CappedRecordStoreV1::CappedRecordStoreV1(OperationContext* txn,
}
// this is for VERY VERY old versions of capped collections
- cappedCheckMigrate(txn);
+ cappedCheckMigrate(opCtx);
}
CappedRecordStoreV1::~CappedRecordStoreV1() {}
-StatusWith<DiskLoc> CappedRecordStoreV1::allocRecord(OperationContext* txn,
+StatusWith<DiskLoc> CappedRecordStoreV1::allocRecord(OperationContext* opCtx,
int lenToAlloc,
bool enforceQuota) {
{
@@ -100,12 +100,12 @@ StatusWith<DiskLoc> CappedRecordStoreV1::allocRecord(OperationContext* txn,
// the extent check is a way to try and improve performance
// since we have to iterate all the extents (for now) to get
// storage size
- if (lenToAlloc > storageSize(txn)) {
+ if (lenToAlloc > storageSize(opCtx)) {
return StatusWith<DiskLoc>(
ErrorCodes::DocTooLargeForCapped,
mongoutils::str::stream() << "document is larger than capped size " << lenToAlloc
<< " > "
- << storageSize(txn),
+ << storageSize(opCtx),
16328);
}
}
@@ -114,7 +114,7 @@ StatusWith<DiskLoc> CappedRecordStoreV1::allocRecord(OperationContext* txn,
// signal done allocating new extents.
if (!cappedLastDelRecLastExtent().isValid())
- setLastDelRecLastExtent(txn, DiskLoc());
+ setLastDelRecLastExtent(opCtx, DiskLoc());
invariant(lenToAlloc < 400000000);
int passes = 0;
@@ -128,17 +128,17 @@ StatusWith<DiskLoc> CappedRecordStoreV1::allocRecord(OperationContext* txn,
DiskLoc firstEmptyExtent; // This prevents us from infinite looping.
while (1) {
if (_details->numRecords() < _details->maxCappedDocs()) {
- loc = __capAlloc(txn, lenToAlloc);
+ loc = __capAlloc(opCtx, lenToAlloc);
if (!loc.isNull())
break;
}
// If on first iteration through extents, don't delete anything.
if (!_details->capFirstNewRecord().isValid()) {
- advanceCapExtent(txn, _ns);
+ advanceCapExtent(opCtx, _ns);
- if (_details->capExtent() != _details->firstExtent(txn))
- _details->setCapFirstNewRecord(txn, DiskLoc().setInvalid());
+ if (_details->capExtent() != _details->firstExtent(opCtx))
+ _details->setCapFirstNewRecord(opCtx, DiskLoc().setInvalid());
// else signal done with first iteration through extents.
continue;
}
@@ -147,37 +147,37 @@ StatusWith<DiskLoc> CappedRecordStoreV1::allocRecord(OperationContext* txn,
theCapExtent()->firstRecord == _details->capFirstNewRecord()) {
// We've deleted all records that were allocated on the previous
// iteration through this extent.
- advanceCapExtent(txn, _ns);
+ advanceCapExtent(opCtx, _ns);
continue;
}
if (theCapExtent()->firstRecord.isNull()) {
if (firstEmptyExtent.isNull())
firstEmptyExtent = _details->capExtent();
- advanceCapExtent(txn, _ns);
+ advanceCapExtent(opCtx, _ns);
if (firstEmptyExtent == _details->capExtent()) {
// All records have been deleted but there is still no room for this record.
// Nothing we can do but fail.
- _maybeComplain(txn, lenToAlloc);
+ _maybeComplain(opCtx, lenToAlloc);
return StatusWith<DiskLoc>(ErrorCodes::DocTooLargeForCapped,
str::stream()
<< "document doesn't fit in capped collection."
<< " size: "
<< lenToAlloc
<< " storageSize:"
- << storageSize(txn),
+ << storageSize(opCtx),
28575);
}
continue;
}
const RecordId fr = theCapExtent()->firstRecord.toRecordId();
- Status status = _cappedCallback->aboutToDeleteCapped(txn, fr, dataFor(txn, fr));
+ Status status = _cappedCallback->aboutToDeleteCapped(opCtx, fr, dataFor(opCtx, fr));
if (!status.isOK())
return StatusWith<DiskLoc>(status);
- deleteRecord(txn, fr);
+ deleteRecord(opCtx, fr);
- _compact(txn);
+ _compact(opCtx);
if ((++passes % 5000) == 0) {
StringBuilder sb;
log() << "passes = " << passes << " in CappedRecordStoreV1::allocRecord:"
@@ -191,7 +191,7 @@ StatusWith<DiskLoc> CappedRecordStoreV1::allocRecord(OperationContext* txn,
// Remember first record allocated on this iteration through capExtent.
if (_details->capFirstNewRecord().isValid() && _details->capFirstNewRecord().isNull())
- _details->setCapFirstNewRecord(txn, loc);
+ _details->setCapFirstNewRecord(opCtx, loc);
}
invariant(!loc.isNull());
@@ -208,53 +208,55 @@ StatusWith<DiskLoc> CappedRecordStoreV1::allocRecord(OperationContext* txn,
int left = regionlen - lenToAlloc;
/* split off some for further use. */
- txn->recoveryUnit()->writingInt(r->lengthWithHeaders()) = lenToAlloc;
+ opCtx->recoveryUnit()->writingInt(r->lengthWithHeaders()) = lenToAlloc;
DiskLoc newDelLoc = loc;
newDelLoc.inc(lenToAlloc);
DeletedRecord* newDel = drec(newDelLoc);
- DeletedRecord* newDelW = txn->recoveryUnit()->writing(newDel);
+ DeletedRecord* newDelW = opCtx->recoveryUnit()->writing(newDel);
newDelW->extentOfs() = r->extentOfs();
newDelW->lengthWithHeaders() = left;
newDelW->nextDeleted().Null();
- addDeletedRec(txn, newDelLoc);
+ addDeletedRec(opCtx, newDelLoc);
return StatusWith<DiskLoc>(loc);
}
-Status CappedRecordStoreV1::truncate(OperationContext* txn) {
- setLastDelRecLastExtent(txn, DiskLoc());
- setListOfAllDeletedRecords(txn, DiskLoc());
+Status CappedRecordStoreV1::truncate(OperationContext* opCtx) {
+ setLastDelRecLastExtent(opCtx, DiskLoc());
+ setListOfAllDeletedRecords(opCtx, DiskLoc());
// preserve firstExtent/lastExtent
- _details->setCapExtent(txn, _details->firstExtent(txn));
- _details->setStats(txn, 0, 0);
+ _details->setCapExtent(opCtx, _details->firstExtent(opCtx));
+ _details->setStats(opCtx, 0, 0);
// preserve lastExtentSize
// nIndexes preserve 0
// capped preserve true
// max preserve
// paddingFactor is unused
- _details->setCapFirstNewRecord(txn, DiskLoc().setInvalid());
- setLastDelRecLastExtent(txn, DiskLoc().setInvalid());
+ _details->setCapFirstNewRecord(opCtx, DiskLoc().setInvalid());
+ setLastDelRecLastExtent(opCtx, DiskLoc().setInvalid());
// dataFileVersion preserve
// indexFileVersion preserve
// Reset all existing extents and recreate the deleted list.
Extent* ext;
- for (DiskLoc extLoc = _details->firstExtent(txn); !extLoc.isNull(); extLoc = ext->xnext) {
+ for (DiskLoc extLoc = _details->firstExtent(opCtx); !extLoc.isNull(); extLoc = ext->xnext) {
ext = _extentManager->getExtent(extLoc);
- txn->recoveryUnit()->writing(&ext->firstRecord)->Null();
- txn->recoveryUnit()->writing(&ext->lastRecord)->Null();
+ opCtx->recoveryUnit()->writing(&ext->firstRecord)->Null();
+ opCtx->recoveryUnit()->writing(&ext->lastRecord)->Null();
- addDeletedRec(txn, _findFirstSpot(txn, extLoc, ext));
+ addDeletedRec(opCtx, _findFirstSpot(opCtx, extLoc, ext));
}
return Status::OK();
}
-void CappedRecordStoreV1::cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive) {
- cappedTruncateAfter(txn, _ns.c_str(), DiskLoc::fromRecordId(end), inclusive);
+void CappedRecordStoreV1::cappedTruncateAfter(OperationContext* opCtx,
+ RecordId end,
+ bool inclusive) {
+ cappedTruncateAfter(opCtx, _ns.c_str(), DiskLoc::fromRecordId(end), inclusive);
}
/* combine adjacent deleted records *for the current extent* of the capped collection
@@ -262,7 +264,7 @@ void CappedRecordStoreV1::cappedTruncateAfter(OperationContext* txn, RecordId en
this is O(n^2) but we call it for capped tables where typically n==1 or 2!
(or 3...there will be a little unused sliver at the end of the extent.)
*/
-void CappedRecordStoreV1::_compact(OperationContext* txn) {
+void CappedRecordStoreV1::_compact(OperationContext* opCtx) {
DDD("CappedRecordStoreV1::compact enter");
vector<DiskLoc> drecs;
@@ -274,7 +276,7 @@ void CappedRecordStoreV1::_compact(OperationContext* txn) {
drecs.push_back(i);
}
- setFirstDeletedInCurExtent(txn, i);
+ setFirstDeletedInCurExtent(opCtx, i);
std::sort(drecs.begin(), drecs.end());
DDD("\t drecs.size(): " << drecs.size());
@@ -286,24 +288,24 @@ void CappedRecordStoreV1::_compact(OperationContext* txn) {
j++;
if (j == drecs.end()) {
DDD("\t compact adddelrec");
- addDeletedRec(txn, a);
+ addDeletedRec(opCtx, a);
break;
}
DiskLoc b = *j;
while (a.a() == b.a() && a.getOfs() + drec(a)->lengthWithHeaders() == b.getOfs()) {
// a & b are adjacent. merge.
- txn->recoveryUnit()->writingInt(drec(a)->lengthWithHeaders()) +=
+ opCtx->recoveryUnit()->writingInt(drec(a)->lengthWithHeaders()) +=
drec(b)->lengthWithHeaders();
j++;
if (j == drecs.end()) {
DDD("\t compact adddelrec2");
- addDeletedRec(txn, a);
+ addDeletedRec(opCtx, a);
return;
}
b = *j;
}
DDD("\t compact adddelrec3");
- addDeletedRec(txn, a);
+ addDeletedRec(opCtx, a);
a = b;
}
}
@@ -315,18 +317,18 @@ DiskLoc CappedRecordStoreV1::cappedFirstDeletedInCurExtent() const {
return drec(cappedLastDelRecLastExtent())->nextDeleted();
}
-void CappedRecordStoreV1::setFirstDeletedInCurExtent(OperationContext* txn, const DiskLoc& loc) {
+void CappedRecordStoreV1::setFirstDeletedInCurExtent(OperationContext* opCtx, const DiskLoc& loc) {
if (cappedLastDelRecLastExtent().isNull())
- setListOfAllDeletedRecords(txn, loc);
+ setListOfAllDeletedRecords(opCtx, loc);
else
- *txn->recoveryUnit()->writing(&drec(cappedLastDelRecLastExtent())->nextDeleted()) = loc;
+ *opCtx->recoveryUnit()->writing(&drec(cappedLastDelRecLastExtent())->nextDeleted()) = loc;
}
-void CappedRecordStoreV1::cappedCheckMigrate(OperationContext* txn) {
+void CappedRecordStoreV1::cappedCheckMigrate(OperationContext* opCtx) {
// migrate old RecordStoreV1MetaData format
if (_details->capExtent().a() == 0 && _details->capExtent().getOfs() == 0) {
- WriteUnitOfWork wunit(txn);
- _details->setCapFirstNewRecord(txn, DiskLoc().setInvalid());
+ WriteUnitOfWork wunit(opCtx);
+ _details->setCapFirstNewRecord(opCtx, DiskLoc().setInvalid());
// put all the DeletedRecords in cappedListOfAllDeletedRecords()
for (int i = 1; i < Buckets; ++i) {
DiskLoc first = _details->deletedListEntry(i);
@@ -335,15 +337,15 @@ void CappedRecordStoreV1::cappedCheckMigrate(OperationContext* txn) {
DiskLoc last = first;
for (; !drec(last)->nextDeleted().isNull(); last = drec(last)->nextDeleted())
;
- *txn->recoveryUnit()->writing(&drec(last)->nextDeleted()) =
+ *opCtx->recoveryUnit()->writing(&drec(last)->nextDeleted()) =
cappedListOfAllDeletedRecords();
- setListOfAllDeletedRecords(txn, first);
- _details->setDeletedListEntry(txn, i, DiskLoc());
+ setListOfAllDeletedRecords(opCtx, first);
+ _details->setDeletedListEntry(opCtx, i, DiskLoc());
}
// NOTE cappedLastDelRecLastExtent() set to DiskLoc() in above
// Last, in case we're killed before getting here
- _details->setCapExtent(txn, _details->firstExtent(txn));
+ _details->setCapExtent(opCtx, _details->firstExtent(opCtx));
wunit.commit();
}
}
@@ -370,29 +372,30 @@ bool CappedRecordStoreV1::nextIsInCapExtent(const DiskLoc& dl) const {
return inCapExtent(next);
}
-void CappedRecordStoreV1::advanceCapExtent(OperationContext* txn, StringData ns) {
+void CappedRecordStoreV1::advanceCapExtent(OperationContext* opCtx, StringData ns) {
// We want cappedLastDelRecLastExtent() to be the last DeletedRecord of the prev cap extent
// (or DiskLoc() if new capExtent == firstExtent)
- if (_details->capExtent() == _details->lastExtent(txn))
- setLastDelRecLastExtent(txn, DiskLoc());
+ if (_details->capExtent() == _details->lastExtent(opCtx))
+ setLastDelRecLastExtent(opCtx, DiskLoc());
else {
DiskLoc i = cappedFirstDeletedInCurExtent();
for (; !i.isNull() && nextIsInCapExtent(i); i = drec(i)->nextDeleted())
;
- setLastDelRecLastExtent(txn, i);
+ setLastDelRecLastExtent(opCtx, i);
}
- _details->setCapExtent(
- txn, theCapExtent()->xnext.isNull() ? _details->firstExtent(txn) : theCapExtent()->xnext);
+ _details->setCapExtent(opCtx,
+ theCapExtent()->xnext.isNull() ? _details->firstExtent(opCtx)
+ : theCapExtent()->xnext);
/* this isn't true if a collection has been renamed...that is ok just used for diagnostics */
// dassert( theCapExtent()->ns == ns );
theCapExtent()->assertOk();
- _details->setCapFirstNewRecord(txn, DiskLoc());
+ _details->setCapFirstNewRecord(opCtx, DiskLoc());
}
-DiskLoc CappedRecordStoreV1::__capAlloc(OperationContext* txn, int len) {
+DiskLoc CappedRecordStoreV1::__capAlloc(OperationContext* opCtx, int len) {
DiskLoc prev = cappedLastDelRecLastExtent();
DiskLoc i = cappedFirstDeletedInCurExtent();
DiskLoc ret;
@@ -408,10 +411,10 @@ DiskLoc CappedRecordStoreV1::__capAlloc(OperationContext* txn, int len) {
/* unlink ourself from the deleted list */
if (!ret.isNull()) {
if (prev.isNull())
- setListOfAllDeletedRecords(txn, drec(ret)->nextDeleted());
+ setListOfAllDeletedRecords(opCtx, drec(ret)->nextDeleted());
else
- *txn->recoveryUnit()->writing(&drec(prev)->nextDeleted()) = drec(ret)->nextDeleted();
- *txn->recoveryUnit()->writing(&drec(ret)->nextDeleted()) =
+ *opCtx->recoveryUnit()->writing(&drec(prev)->nextDeleted()) = drec(ret)->nextDeleted();
+ *opCtx->recoveryUnit()->writing(&drec(ret)->nextDeleted()) =
DiskLoc().setInvalid(); // defensive.
invariant(drec(ret)->extentOfs() < ret.getOfs());
}
@@ -419,12 +422,12 @@ DiskLoc CappedRecordStoreV1::__capAlloc(OperationContext* txn, int len) {
return ret;
}
-void CappedRecordStoreV1::cappedTruncateLastDelUpdate(OperationContext* txn) {
- if (_details->capExtent() == _details->firstExtent(txn)) {
+void CappedRecordStoreV1::cappedTruncateLastDelUpdate(OperationContext* opCtx) {
+ if (_details->capExtent() == _details->firstExtent(opCtx)) {
// Only one extent of the collection is in use, so there
// is no deleted record in a previous extent, so nullify
// cappedLastDelRecLastExtent().
- setLastDelRecLastExtent(txn, DiskLoc());
+ setLastDelRecLastExtent(opCtx, DiskLoc());
} else {
// Scan through all deleted records in the collection
// until the last deleted record for the extent prior
@@ -439,11 +442,11 @@ void CappedRecordStoreV1::cappedTruncateLastDelUpdate(OperationContext* txn) {
// record. (We expect that there will be deleted records in the new
// capExtent as well.)
invariant(!drec(i)->nextDeleted().isNull());
- setLastDelRecLastExtent(txn, i);
+ setLastDelRecLastExtent(opCtx, i);
}
}
-void CappedRecordStoreV1::cappedTruncateAfter(OperationContext* txn,
+void CappedRecordStoreV1::cappedTruncateAfter(OperationContext* opCtx,
const char* ns,
DiskLoc end,
bool inclusive) {
@@ -476,13 +479,13 @@ void CappedRecordStoreV1::cappedTruncateAfter(OperationContext* txn,
// this case instead of asserting.
uassert(13415, "emptying the collection is not allowed", _details->numRecords() > 1);
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
// Delete the newest record, and coalesce the new deleted
// record with existing deleted records.
- Status status = _cappedCallback->aboutToDeleteCapped(txn, currId, dataFor(txn, currId));
+ Status status = _cappedCallback->aboutToDeleteCapped(opCtx, currId, dataFor(opCtx, currId));
uassertStatusOK(status);
- deleteRecord(txn, currId);
- _compact(txn);
+ deleteRecord(opCtx, currId);
+ _compact(opCtx);
// This is the case where we have not yet had to remove any
// documents to make room for other documents, and we are allocating
@@ -497,11 +500,11 @@ void CappedRecordStoreV1::cappedTruncateAfter(OperationContext* txn,
// NOTE Because we didn't delete the last document, and
// capLooped() is false, capExtent is not the first extent
// so xprev will be nonnull.
- _details->setCapExtent(txn, theCapExtent()->xprev);
+ _details->setCapExtent(opCtx, theCapExtent()->xprev);
theCapExtent()->assertOk();
// update cappedLastDelRecLastExtent()
- cappedTruncateLastDelUpdate(txn);
+ cappedTruncateLastDelUpdate(opCtx);
}
wunit.commit();
continue;
@@ -524,20 +527,20 @@ void CappedRecordStoreV1::cappedTruncateAfter(OperationContext* txn,
DiskLoc newCapExtent = _details->capExtent();
do {
// Find the previous extent, looping if necessary.
- newCapExtent = (newCapExtent == _details->firstExtent(txn))
- ? _details->lastExtent(txn)
+ newCapExtent = (newCapExtent == _details->firstExtent(opCtx))
+ ? _details->lastExtent(opCtx)
: _extentManager->getExtent(newCapExtent)->xprev;
_extentManager->getExtent(newCapExtent)->assertOk();
} while (_extentManager->getExtent(newCapExtent)->firstRecord.isNull());
- _details->setCapExtent(txn, newCapExtent);
+ _details->setCapExtent(opCtx, newCapExtent);
// Place all documents in the new capExtent on the fresh side
// of the capExtent by setting capFirstNewRecord to the first
// document in the new capExtent.
- _details->setCapFirstNewRecord(txn, theCapExtent()->firstRecord);
+ _details->setCapFirstNewRecord(opCtx, theCapExtent()->firstRecord);
// update cappedLastDelRecLastExtent()
- cappedTruncateLastDelUpdate(txn);
+ cappedTruncateLastDelUpdate(opCtx);
}
wunit.commit();
@@ -548,62 +551,63 @@ DiskLoc CappedRecordStoreV1::cappedListOfAllDeletedRecords() const {
return _details->deletedListEntry(0);
}
-void CappedRecordStoreV1::setListOfAllDeletedRecords(OperationContext* txn, const DiskLoc& loc) {
- return _details->setDeletedListEntry(txn, 0, loc);
+void CappedRecordStoreV1::setListOfAllDeletedRecords(OperationContext* opCtx, const DiskLoc& loc) {
+ return _details->setDeletedListEntry(opCtx, 0, loc);
}
DiskLoc CappedRecordStoreV1::cappedLastDelRecLastExtent() const {
return _details->deletedListEntry(1);
}
-void CappedRecordStoreV1::setLastDelRecLastExtent(OperationContext* txn, const DiskLoc& loc) {
- return _details->setDeletedListEntry(txn, 1, loc);
+void CappedRecordStoreV1::setLastDelRecLastExtent(OperationContext* opCtx, const DiskLoc& loc) {
+ return _details->setDeletedListEntry(opCtx, 1, loc);
}
Extent* CappedRecordStoreV1::theCapExtent() const {
return _extentManager->getExtent(_details->capExtent());
}
-void CappedRecordStoreV1::addDeletedRec(OperationContext* txn, const DiskLoc& dloc) {
- DeletedRecord* d = txn->recoveryUnit()->writing(drec(dloc));
+void CappedRecordStoreV1::addDeletedRec(OperationContext* opCtx, const DiskLoc& dloc) {
+ DeletedRecord* d = opCtx->recoveryUnit()->writing(drec(dloc));
if (!cappedLastDelRecLastExtent().isValid()) {
// Initial extent allocation. Insert at end.
d->nextDeleted() = DiskLoc();
if (cappedListOfAllDeletedRecords().isNull())
- setListOfAllDeletedRecords(txn, dloc);
+ setListOfAllDeletedRecords(opCtx, dloc);
else {
DiskLoc i = cappedListOfAllDeletedRecords();
for (; !drec(i)->nextDeleted().isNull(); i = drec(i)->nextDeleted())
;
- *txn->recoveryUnit()->writing(&drec(i)->nextDeleted()) = dloc;
+ *opCtx->recoveryUnit()->writing(&drec(i)->nextDeleted()) = dloc;
}
} else {
d->nextDeleted() = cappedFirstDeletedInCurExtent();
- setFirstDeletedInCurExtent(txn, dloc);
+ setFirstDeletedInCurExtent(opCtx, dloc);
// always _compact() after this so order doesn't matter
}
}
-std::unique_ptr<SeekableRecordCursor> CappedRecordStoreV1::getCursor(OperationContext* txn,
+std::unique_ptr<SeekableRecordCursor> CappedRecordStoreV1::getCursor(OperationContext* opCtx,
bool forward) const {
- return stdx::make_unique<CappedRecordStoreV1Iterator>(txn, this, forward);
+ return stdx::make_unique<CappedRecordStoreV1Iterator>(opCtx, this, forward);
}
vector<std::unique_ptr<RecordCursor>> CappedRecordStoreV1::getManyCursors(
- OperationContext* txn) const {
+ OperationContext* opCtx) const {
vector<std::unique_ptr<RecordCursor>> cursors;
if (!_details->capLooped()) {
// if we haven't looped yet, just spit out all extents (same as non-capped impl)
const Extent* ext;
- for (DiskLoc extLoc = details()->firstExtent(txn); !extLoc.isNull(); extLoc = ext->xnext) {
- ext = _getExtent(txn, extLoc);
+ for (DiskLoc extLoc = details()->firstExtent(opCtx); !extLoc.isNull();
+ extLoc = ext->xnext) {
+ ext = _getExtent(opCtx, extLoc);
if (ext->firstRecord.isNull())
continue;
cursors.push_back(stdx::make_unique<RecordStoreV1Base::IntraExtentIterator>(
- txn, ext->firstRecord, this));
+ opCtx, ext->firstRecord, this));
}
} else {
// if we've looped we need to iterate the extents, starting and ending with the
@@ -615,40 +619,40 @@ vector<std::unique_ptr<RecordCursor>> CappedRecordStoreV1::getManyCursors(
// First do the "old" portion of capExtent if there is any
DiskLoc extLoc = capExtent;
{
- const Extent* ext = _getExtent(txn, extLoc);
+ const Extent* ext = _getExtent(opCtx, extLoc);
if (ext->firstRecord != details()->capFirstNewRecord()) {
// this means there is old data in capExtent
cursors.push_back(stdx::make_unique<RecordStoreV1Base::IntraExtentIterator>(
- txn, ext->firstRecord, this));
+ opCtx, ext->firstRecord, this));
}
- extLoc = ext->xnext.isNull() ? details()->firstExtent(txn) : ext->xnext;
+ extLoc = ext->xnext.isNull() ? details()->firstExtent(opCtx) : ext->xnext;
}
// Next handle all the other extents
while (extLoc != capExtent) {
- const Extent* ext = _getExtent(txn, extLoc);
+ const Extent* ext = _getExtent(opCtx, extLoc);
cursors.push_back(stdx::make_unique<RecordStoreV1Base::IntraExtentIterator>(
- txn, ext->firstRecord, this));
+ opCtx, ext->firstRecord, this));
- extLoc = ext->xnext.isNull() ? details()->firstExtent(txn) : ext->xnext;
+ extLoc = ext->xnext.isNull() ? details()->firstExtent(opCtx) : ext->xnext;
}
// Finally handle the "new" data in the capExtent
cursors.push_back(stdx::make_unique<RecordStoreV1Base::IntraExtentIterator>(
- txn, details()->capFirstNewRecord(), this));
+ opCtx, details()->capFirstNewRecord(), this));
}
return cursors;
}
-void CappedRecordStoreV1::_maybeComplain(OperationContext* txn, int len) const {
+void CappedRecordStoreV1::_maybeComplain(OperationContext* opCtx, int len) const {
RARELY {
std::stringstream buf;
buf << "couldn't make room for record len: " << len << " in capped ns " << _ns << '\n';
- buf << "numRecords: " << numRecords(txn) << '\n';
+ buf << "numRecords: " << numRecords(opCtx) << '\n';
int i = 0;
- for (DiskLoc e = _details->firstExtent(txn); !e.isNull();
+ for (DiskLoc e = _details->firstExtent(opCtx); !e.isNull();
e = _extentManager->getExtent(e)->xnext, ++i) {
buf << " Extent " << i;
if (e == _details->capExtent())
@@ -666,12 +670,13 @@ void CappedRecordStoreV1::_maybeComplain(OperationContext* txn, int len) const {
warning() << buf.str();
// assume it is unusually large record; if not, something is broken
- fassert(17438, len * 5 > _details->lastExtentSize(txn));
+ fassert(17438, len * 5 > _details->lastExtentSize(opCtx));
}
}
-DiskLoc CappedRecordStoreV1::firstRecord(OperationContext* txn, const DiskLoc& startExtent) const {
- for (DiskLoc i = startExtent.isNull() ? _details->firstExtent(txn) : startExtent; !i.isNull();
+DiskLoc CappedRecordStoreV1::firstRecord(OperationContext* opCtx,
+ const DiskLoc& startExtent) const {
+ for (DiskLoc i = startExtent.isNull() ? _details->firstExtent(opCtx) : startExtent; !i.isNull();
i = _extentManager->getExtent(i)->xnext) {
Extent* e = _extentManager->getExtent(i);
@@ -681,8 +686,8 @@ DiskLoc CappedRecordStoreV1::firstRecord(OperationContext* txn, const DiskLoc& s
return DiskLoc();
}
-DiskLoc CappedRecordStoreV1::lastRecord(OperationContext* txn, const DiskLoc& startExtent) const {
- for (DiskLoc i = startExtent.isNull() ? _details->lastExtent(txn) : startExtent; !i.isNull();
+DiskLoc CappedRecordStoreV1::lastRecord(OperationContext* opCtx, const DiskLoc& startExtent) const {
+ for (DiskLoc i = startExtent.isNull() ? _details->lastExtent(opCtx) : startExtent; !i.isNull();
i = _extentManager->getExtent(i)->xprev) {
Extent* e = _extentManager->getExtent(i);
if (!e->lastRecord.isNull())
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_capped.h b/src/mongo/db/storage/mmap_v1/record_store_v1_capped.h
index 3fc64a76cd9..d74fc7c65ea 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_capped.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_capped.h
@@ -40,7 +40,7 @@ namespace mongo {
class CappedRecordStoreV1 final : public RecordStoreV1Base {
public:
- CappedRecordStoreV1(OperationContext* txn,
+ CappedRecordStoreV1(OperationContext* opCtx,
CappedCallback* collection,
StringData ns,
RecordStoreV1MetaData* details,
@@ -53,7 +53,7 @@ public:
return "CappedRecordStoreV1";
}
- Status truncate(OperationContext* txn) final;
+ Status truncate(OperationContext* opCtx) final;
/**
* Truncate documents newer than the document at 'end' from the capped
@@ -61,17 +61,17 @@ public:
* function. An assertion will be thrown if that is attempted.
* @param inclusive - Truncate 'end' as well iff true
*/
- void cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive) final;
+ void cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) final;
- std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* txn,
+ std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* opCtx,
bool forward) const final;
- std::vector<std::unique_ptr<RecordCursor>> getManyCursors(OperationContext* txn) const final;
+ std::vector<std::unique_ptr<RecordCursor>> getManyCursors(OperationContext* opCtx) const final;
// Start from firstExtent by default.
- DiskLoc firstRecord(OperationContext* txn, const DiskLoc& startExtent = DiskLoc()) const;
+ DiskLoc firstRecord(OperationContext* opCtx, const DiskLoc& startExtent = DiskLoc()) const;
// Start from lastExtent by default.
- DiskLoc lastRecord(OperationContext* txn, const DiskLoc& startExtent = DiskLoc()) const;
+ DiskLoc lastRecord(OperationContext* opCtx, const DiskLoc& startExtent = DiskLoc()) const;
protected:
bool isCapped() const final {
@@ -85,28 +85,28 @@ protected:
_cappedCallback = cb;
}
- StatusWith<DiskLoc> allocRecord(OperationContext* txn,
+ StatusWith<DiskLoc> allocRecord(OperationContext* opCtx,
int lengthWithHeaders,
bool enforceQuota) final;
- void addDeletedRec(OperationContext* txn, const DiskLoc& dloc) final;
+ void addDeletedRec(OperationContext* opCtx, const DiskLoc& dloc) final;
private:
// -- start copy from cap.cpp --
- void _compact(OperationContext* txn);
+ void _compact(OperationContext* opCtx);
DiskLoc cappedFirstDeletedInCurExtent() const;
- void setFirstDeletedInCurExtent(OperationContext* txn, const DiskLoc& loc);
- void cappedCheckMigrate(OperationContext* txn);
- DiskLoc __capAlloc(OperationContext* txn, int len);
+ void setFirstDeletedInCurExtent(OperationContext* opCtx, const DiskLoc& loc);
+ void cappedCheckMigrate(OperationContext* opCtx);
+ DiskLoc __capAlloc(OperationContext* opCtx, int len);
bool inCapExtent(const DiskLoc& dl) const;
DiskLoc cappedListOfAllDeletedRecords() const;
DiskLoc cappedLastDelRecLastExtent() const;
- void setListOfAllDeletedRecords(OperationContext* txn, const DiskLoc& loc);
- void setLastDelRecLastExtent(OperationContext* txn, const DiskLoc& loc);
+ void setListOfAllDeletedRecords(OperationContext* opCtx, const DiskLoc& loc);
+ void setLastDelRecLastExtent(OperationContext* opCtx, const DiskLoc& loc);
Extent* theCapExtent() const;
bool nextIsInCapExtent(const DiskLoc& dl) const;
- void advanceCapExtent(OperationContext* txn, StringData ns);
- void cappedTruncateLastDelUpdate(OperationContext* txn);
+ void advanceCapExtent(OperationContext* opCtx, StringData ns);
+ void cappedTruncateLastDelUpdate(OperationContext* opCtx);
/**
* Truncate documents newer than the document at 'end' from the capped
@@ -114,9 +114,9 @@ private:
* function. An assertion will be thrown if that is attempted.
* @param inclusive - Truncate 'end' as well iff true
*/
- void cappedTruncateAfter(OperationContext* txn, const char* ns, DiskLoc end, bool inclusive);
+ void cappedTruncateAfter(OperationContext* opCtx, const char* ns, DiskLoc end, bool inclusive);
- void _maybeComplain(OperationContext* txn, int len) const;
+ void _maybeComplain(OperationContext* opCtx, int len) const;
// -- end copy from cap.cpp --
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.cpp
index cdd8363d949..20324ffe5ee 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.cpp
@@ -39,10 +39,10 @@ namespace mongo {
//
// Capped collection traversal
//
-CappedRecordStoreV1Iterator::CappedRecordStoreV1Iterator(OperationContext* txn,
+CappedRecordStoreV1Iterator::CappedRecordStoreV1Iterator(OperationContext* opCtx,
const CappedRecordStoreV1* collection,
bool forward)
- : _txn(txn), _recordStore(collection), _forward(forward) {
+ : _opCtx(opCtx), _recordStore(collection), _forward(forward) {
const RecordStoreV1MetaData* nsd = _recordStore->details();
// If a start position isn't specified, we fill one out from the start of the
@@ -51,7 +51,7 @@ CappedRecordStoreV1Iterator::CappedRecordStoreV1Iterator(OperationContext* txn,
// Going forwards.
if (!nsd->capLooped()) {
// If our capped collection doesn't loop around, the first record is easy.
- _curr = collection->firstRecord(_txn);
+ _curr = collection->firstRecord(_opCtx);
} else {
// Our capped collection has "looped' around.
// Copied verbatim from ForwardCappedCursor::init.
@@ -66,7 +66,7 @@ CappedRecordStoreV1Iterator::CappedRecordStoreV1Iterator(OperationContext* txn,
// Going backwards
if (!nsd->capLooped()) {
// Start at the end.
- _curr = collection->lastRecord(_txn);
+ _curr = collection->lastRecord(_opCtx);
} else {
_curr = _getExtent(nsd->capExtent())->lastRecord;
}
@@ -78,15 +78,15 @@ boost::optional<Record> CappedRecordStoreV1Iterator::next() {
return {};
auto toReturn = _curr.toRecordId();
_curr = getNextCapped(_curr);
- return {{toReturn, _recordStore->RecordStore::dataFor(_txn, toReturn)}};
+ return {{toReturn, _recordStore->RecordStore::dataFor(_opCtx, toReturn)}};
}
boost::optional<Record> CappedRecordStoreV1Iterator::seekExact(const RecordId& id) {
_curr = getNextCapped(DiskLoc::fromRecordId(id));
- return {{id, _recordStore->RecordStore::dataFor(_txn, id)}};
+ return {{id, _recordStore->RecordStore::dataFor(_opCtx, id)}};
}
-void CappedRecordStoreV1Iterator::invalidate(OperationContext* txn, const RecordId& id) {
+void CappedRecordStoreV1Iterator::invalidate(OperationContext* opCtx, const RecordId& id) {
const DiskLoc dl = DiskLoc::fromRecordId(id);
if (dl == _curr) {
// We *could* move to the next thing, since there is actually a next
@@ -179,7 +179,7 @@ DiskLoc CappedRecordStoreV1Iterator::nextLoop(const DiskLoc& prev) {
if (!next.isNull()) {
return next;
}
- return _recordStore->firstRecord(_txn);
+ return _recordStore->firstRecord(_opCtx);
}
DiskLoc CappedRecordStoreV1Iterator::prevLoop(const DiskLoc& curr) {
@@ -188,7 +188,7 @@ DiskLoc CappedRecordStoreV1Iterator::prevLoop(const DiskLoc& curr) {
if (!prev.isNull()) {
return prev;
}
- return _recordStore->lastRecord(_txn);
+ return _recordStore->lastRecord(_opCtx);
}
@@ -197,11 +197,11 @@ Extent* CappedRecordStoreV1Iterator::_getExtent(const DiskLoc& loc) {
}
DiskLoc CappedRecordStoreV1Iterator::_getNextRecord(const DiskLoc& loc) {
- return _recordStore->getNextRecord(_txn, loc);
+ return _recordStore->getNextRecord(_opCtx, loc);
}
DiskLoc CappedRecordStoreV1Iterator::_getPrevRecord(const DiskLoc& loc) {
- return _recordStore->getPrevRecord(_txn, loc);
+ return _recordStore->getPrevRecord(_opCtx, loc);
}
std::unique_ptr<RecordFetcher> CappedRecordStoreV1Iterator::fetcherForNext() const {
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.h b/src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.h
index 275c78cae38..08065109c3f 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.h
@@ -43,7 +43,7 @@ struct Extent;
*/
class CappedRecordStoreV1Iterator final : public SeekableRecordCursor {
public:
- CappedRecordStoreV1Iterator(OperationContext* txn,
+ CappedRecordStoreV1Iterator(OperationContext* opCtx,
const CappedRecordStoreV1* collection,
bool forward);
@@ -52,12 +52,12 @@ public:
void save() final;
bool restore() final;
void detachFromOperationContext() final {
- _txn = nullptr;
+ _opCtx = nullptr;
}
- void reattachToOperationContext(OperationContext* txn) final {
- _txn = txn;
+ void reattachToOperationContext(OperationContext* opCtx) final {
+ _opCtx = opCtx;
}
- void invalidate(OperationContext* txn, const RecordId& dl) final;
+ void invalidate(OperationContext* opCtx, const RecordId& dl) final;
std::unique_ptr<RecordFetcher> fetcherForNext() const final;
std::unique_ptr<RecordFetcher> fetcherForId(const RecordId& id) const final;
@@ -80,7 +80,7 @@ private:
DiskLoc _getPrevRecord(const DiskLoc& loc);
// transactional context for read locks. Not owned by us
- OperationContext* _txn;
+ OperationContext* _opCtx;
// The collection we're iterating over.
const CappedRecordStoreV1* const _recordStore;
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp
index 2bde7396e44..de02abcf76b 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp
@@ -51,7 +51,7 @@ char zeros[20 * 1024 * 1024] = {};
class DummyCappedCallback : public CappedCallback {
public:
- Status aboutToDeleteCapped(OperationContext* txn, const RecordId& loc, RecordData data) {
+ Status aboutToDeleteCapped(OperationContext* opCtx, const RecordId& loc, RecordData data) {
deleted.push_back(DiskLoc::fromRecordId(loc));
return Status::OK();
}
@@ -62,35 +62,35 @@ public:
};
void simpleInsertTest(const char* buf, int size) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
DummyCappedCallback cb;
string myns = "test.simple1";
- CappedRecordStoreV1 rs(&txn, &cb, myns, md, &em, false);
+ CappedRecordStoreV1 rs(&opCtx, &cb, myns, md, &em, false);
- rs.increaseStorageSize(&txn, 1024, false);
+ rs.increaseStorageSize(&opCtx, 1024, false);
- ASSERT_NOT_OK(rs.insertRecord(&txn, buf, 3, 1000).getStatus());
+ ASSERT_NOT_OK(rs.insertRecord(&opCtx, buf, 3, 1000).getStatus());
- rs.insertRecord(&txn, buf, size, 10000);
+ rs.insertRecord(&opCtx, buf, size, 10000);
{
BSONObjBuilder b;
- int64_t storageSize = rs.storageSize(&txn, &b);
+ int64_t storageSize = rs.storageSize(&opCtx, &b);
BSONObj obj = b.obj();
ASSERT_EQUALS(1, obj["numExtents"].numberInt());
ASSERT_EQUALS(storageSize, em.quantizeExtentSize(1024));
}
for (int i = 0; i < 1000; i++) {
- ASSERT_OK(rs.insertRecord(&txn, buf, size, 10000).getStatus());
+ ASSERT_OK(rs.insertRecord(&opCtx, buf, size, 10000).getStatus());
}
long long start = md->numRecords();
for (int i = 0; i < 1000; i++) {
- ASSERT_OK(rs.insertRecord(&txn, buf, size, 10000).getStatus());
+ ASSERT_OK(rs.insertRecord(&opCtx, buf, size, 10000).getStatus());
}
ASSERT_EQUALS(start, md->numRecords());
ASSERT_GREATER_THAN(start, 100);
@@ -105,37 +105,37 @@ TEST(CappedRecordStoreV1, SimpleInsertSize8) {
}
TEST(CappedRecordStoreV1, EmptySingleExtent) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
DummyCappedCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+ CappedRecordStoreV1 rs(&opCtx, &cb, "test.foo", md, &em, false);
{
LocAndSize records[] = {{}};
LocAndSize drecs[] = {{DiskLoc(0, 1000), 1000}, {}};
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid());
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ md->setCapExtent(&opCtx, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&opCtx, DiskLoc().setInvalid());
+ initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 100}, {}};
LocAndSize drecs[] = {{DiskLoc(0, 1100), 900}, {}};
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, NULL, &em, md);
ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc().setInvalid()); // unlooped
}
}
TEST(CappedRecordStoreV1, FirstLoopWithSingleExtentExactSize) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
DummyCappedCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+ CappedRecordStoreV1 rs(&opCtx, &cb, "test.foo", md, &em, false);
{
LocAndSize records[] = {{DiskLoc(0, 1000), 100},
@@ -145,12 +145,12 @@ TEST(CappedRecordStoreV1, FirstLoopWithSingleExtentExactSize) {
{DiskLoc(0, 1400), 100},
{}};
LocAndSize drecs[] = {{DiskLoc(0, 1500), 50}, {}};
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid()); // unlooped
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ md->setCapExtent(&opCtx, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&opCtx, DiskLoc().setInvalid()); // unlooped
+ initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
{
LocAndSize recs[] = {{DiskLoc(0, 1200), 100}, // first old record
@@ -162,18 +162,18 @@ TEST(CappedRecordStoreV1, FirstLoopWithSingleExtentExactSize) {
{DiskLoc(0, 1100), 100}, // gap after newest record XXX this is probably a bug
{DiskLoc(0, 1500), 50}, // gap at end of extent
{}};
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, NULL, &em, md);
ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
}
}
TEST(CappedRecordStoreV1, NonFirstLoopWithSingleExtentExactSize) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
DummyCappedCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+ CappedRecordStoreV1 rs(&opCtx, &cb, "test.foo", md, &em, false);
{
LocAndSize records[] = {{DiskLoc(0, 1000), 100},
@@ -183,12 +183,12 @@ TEST(CappedRecordStoreV1, NonFirstLoopWithSingleExtentExactSize) {
{DiskLoc(0, 1400), 100},
{}};
LocAndSize drecs[] = {{DiskLoc(0, 1500), 50}, {}};
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc(0, 1000));
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ md->setCapExtent(&opCtx, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&opCtx, DiskLoc(0, 1000));
+ initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
{
LocAndSize recs[] = {{DiskLoc(0, 1200), 100}, // first old record
@@ -200,7 +200,7 @@ TEST(CappedRecordStoreV1, NonFirstLoopWithSingleExtentExactSize) {
{DiskLoc(0, 1100), 100}, // gap after newest record XXX this is probably a bug
{DiskLoc(0, 1500), 50}, // gap at end of extent
{}};
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, NULL, &em, md);
ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
}
@@ -210,11 +210,11 @@ TEST(CappedRecordStoreV1, NonFirstLoopWithSingleExtentExactSize) {
* Current code always tries to leave 24 bytes to create a DeletedRecord.
*/
TEST(CappedRecordStoreV1, WillLoopWithout24SpareBytes) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
DummyCappedCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+ CappedRecordStoreV1 rs(&opCtx, &cb, "test.foo", md, &em, false);
{
LocAndSize records[] = {{DiskLoc(0, 1000), 100},
@@ -224,12 +224,12 @@ TEST(CappedRecordStoreV1, WillLoopWithout24SpareBytes) {
{DiskLoc(0, 1400), 100},
{}};
LocAndSize drecs[] = {{DiskLoc(0, 1500), 123}, {}};
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc(0, 1000));
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ md->setCapExtent(&opCtx, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&opCtx, DiskLoc(0, 1000));
+ initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
{
LocAndSize recs[] = {{DiskLoc(0, 1200), 100}, // first old record
@@ -240,18 +240,18 @@ TEST(CappedRecordStoreV1, WillLoopWithout24SpareBytes) {
LocAndSize drecs[] = {{DiskLoc(0, 1100), 100}, // gap after newest record
{DiskLoc(0, 1500), 123}, // gap at end of extent
{}};
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, NULL, &em, md);
ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
}
}
TEST(CappedRecordStoreV1, WontLoopWith24SpareBytes) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
DummyCappedCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+ CappedRecordStoreV1 rs(&opCtx, &cb, "test.foo", md, &em, false);
{
LocAndSize records[] = {{DiskLoc(0, 1000), 100},
@@ -261,12 +261,12 @@ TEST(CappedRecordStoreV1, WontLoopWith24SpareBytes) {
{DiskLoc(0, 1400), 100},
{}};
LocAndSize drecs[] = {{DiskLoc(0, 1500), 124}, {}};
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc(0, 1000));
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ md->setCapExtent(&opCtx, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&opCtx, DiskLoc(0, 1000));
+ initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 100},
@@ -278,30 +278,30 @@ TEST(CappedRecordStoreV1, WontLoopWith24SpareBytes) {
{}};
LocAndSize drecs[] = {{DiskLoc(0, 1600), 24}, // gap at end of extent
{}};
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, NULL, &em, md);
ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
}
}
TEST(CappedRecordStoreV1, MoveToSecondExtentUnLooped) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
DummyCappedCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+ CappedRecordStoreV1 rs(&opCtx, &cb, "test.foo", md, &em, false);
{
// Two extents, each with 1000 bytes.
LocAndSize records[] = {
{DiskLoc(0, 1000), 500}, {DiskLoc(0, 1500), 300}, {DiskLoc(0, 1800), 100}, {}};
LocAndSize drecs[] = {{DiskLoc(0, 1900), 100}, {DiskLoc(1, 1000), 1000}, {}};
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid());
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ md->setCapExtent(&opCtx, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&opCtx, DiskLoc().setInvalid());
+ initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 500},
@@ -311,18 +311,18 @@ TEST(CappedRecordStoreV1, MoveToSecondExtentUnLooped) {
{DiskLoc(1, 1000), 100},
{}};
LocAndSize drecs[] = {{DiskLoc(0, 1900), 100}, {DiskLoc(1, 1100), 900}, {}};
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, NULL, &em, md);
ASSERT_EQUALS(md->capExtent(), DiskLoc(1, 0));
ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc().setInvalid()); // unlooped
}
}
TEST(CappedRecordStoreV1, MoveToSecondExtentLooped) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
DummyCappedCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+ CappedRecordStoreV1 rs(&opCtx, &cb, "test.foo", md, &em, false);
{
// Two extents, each with 1000 bytes.
@@ -334,12 +334,12 @@ TEST(CappedRecordStoreV1, MoveToSecondExtentLooped) {
{DiskLoc(1, 1300), 600},
{}};
LocAndSize drecs[] = {{DiskLoc(0, 1900), 100}, {DiskLoc(1, 1900), 100}, {}};
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc(0, 1000));
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ md->setCapExtent(&opCtx, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&opCtx, DiskLoc(0, 1000));
+ initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&txn, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 500},
@@ -350,7 +350,7 @@ TEST(CappedRecordStoreV1, MoveToSecondExtentLooped) {
{}};
LocAndSize drecs[] = {
{DiskLoc(0, 1800), 200}, {DiskLoc(1, 1200), 100}, {DiskLoc(1, 1900), 100}, {}};
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, NULL, &em, md);
ASSERT_EQUALS(md->capExtent(), DiskLoc(1, 0));
ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(1, 1000));
}
@@ -358,43 +358,43 @@ TEST(CappedRecordStoreV1, MoveToSecondExtentLooped) {
// Larger than storageSize (fails early)
TEST(CappedRecordStoreV1, OversizedRecordHuge) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
DummyCappedCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+ CappedRecordStoreV1 rs(&opCtx, &cb, "test.foo", md, &em, false);
{
LocAndSize records[] = {{}};
LocAndSize drecs[] = {{DiskLoc(0, 1000), 1000}, {}};
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid());
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ md->setCapExtent(&opCtx, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&opCtx, DiskLoc().setInvalid());
+ initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- StatusWith<RecordId> status = rs.insertRecord(&txn, zeros, 16000, false);
+ StatusWith<RecordId> status = rs.insertRecord(&opCtx, zeros, 16000, false);
ASSERT_EQUALS(status.getStatus(), ErrorCodes::DocTooLargeForCapped);
ASSERT_EQUALS(status.getStatus().location(), 16328);
}
// Smaller than storageSize, but larger than usable space (fails late)
TEST(CappedRecordStoreV1, OversizedRecordMedium) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
DummyCappedCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+ CappedRecordStoreV1 rs(&opCtx, &cb, "test.foo", md, &em, false);
{
LocAndSize records[] = {{}};
LocAndSize drecs[] = {{DiskLoc(0, 1000), 1000}, {}};
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid());
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ md->setCapExtent(&opCtx, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&opCtx, DiskLoc().setInvalid());
+ initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
StatusWith<RecordId> status =
- rs.insertRecord(&txn, zeros, 1004 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 1004 - MmapV1RecordHeader::HeaderSize, false);
ASSERT_EQUALS(status.getStatus(), ErrorCodes::DocTooLargeForCapped);
ASSERT_EQUALS(status.getStatus().location(), 28575);
}
@@ -409,28 +409,28 @@ TEST(CappedRecordStoreV1, OversizedRecordMedium) {
* This is a minimal example that shows the current allocator laying out records out-of-order.
*/
TEST(CappedRecordStoreV1Scrambler, Minimal) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
DummyCappedCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+ CappedRecordStoreV1 rs(&opCtx, &cb, "test.foo", md, &em, false);
{
// Starting with a single empty 1000 byte extent.
LocAndSize records[] = {{}};
LocAndSize drecs[] = {{DiskLoc(0, 1000), 1000}, {}};
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid()); // unlooped
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ md->setCapExtent(&opCtx, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&opCtx, DiskLoc().setInvalid()); // unlooped
+ initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&txn, zeros, 500 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 300 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 500 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 300 - MmapV1RecordHeader::HeaderSize, false);
rs.insertRecord(
- &txn, zeros, 400 - MmapV1RecordHeader::HeaderSize, false); // won't fit at end so wraps
- rs.insertRecord(&txn, zeros, 120 - MmapV1RecordHeader::HeaderSize, false); // fits at end
+ &opCtx, zeros, 400 - MmapV1RecordHeader::HeaderSize, false); // won't fit at end so wraps
+ rs.insertRecord(&opCtx, zeros, 120 - MmapV1RecordHeader::HeaderSize, false); // fits at end
rs.insertRecord(
- &txn, zeros, 60 - MmapV1RecordHeader::HeaderSize, false); // fits in earlier hole
+ &opCtx, zeros, 60 - MmapV1RecordHeader::HeaderSize, false); // fits in earlier hole
{
LocAndSize recs[] = {{DiskLoc(0, 1500), 300}, // 2nd insert
@@ -439,7 +439,7 @@ TEST(CappedRecordStoreV1Scrambler, Minimal) {
{DiskLoc(0, 1400), 60}, // 5th
{}};
LocAndSize drecs[] = {{DiskLoc(0, 1460), 40}, {DiskLoc(0, 1920), 80}, {}};
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, NULL, &em, md);
ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
}
@@ -450,51 +450,51 @@ TEST(CappedRecordStoreV1Scrambler, Minimal) {
* that leaves 4 deleted records in a single extent.
*/
TEST(CappedRecordStoreV1Scrambler, FourDeletedRecordsInSingleExtent) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
DummyCappedCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+ CappedRecordStoreV1 rs(&opCtx, &cb, "test.foo", md, &em, false);
{
// Starting with a single empty 1000 byte extent.
LocAndSize records[] = {{}};
LocAndSize drecs[] = {{DiskLoc(0, 1000), 1000}, {}};
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid()); // unlooped
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ md->setCapExtent(&opCtx, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&opCtx, DiskLoc().setInvalid()); // unlooped
+ initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
// This list of sizes was empirically generated to achieve this outcome. Don't think too
// much about them.
- rs.insertRecord(&txn, zeros, 500 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 300 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 304 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 76 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 76 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 56 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 104 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 60 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 60 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 146 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 146 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 40 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 40 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 36 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 60 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 64 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 500 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 300 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 304 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 76 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 76 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 56 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 104 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 60 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 60 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 146 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 146 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 40 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 40 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 36 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 60 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 64 - MmapV1RecordHeader::HeaderSize, false);
{
LocAndSize recs[] = {{DiskLoc(0, 1148), 148},
@@ -512,7 +512,7 @@ TEST(CappedRecordStoreV1Scrambler, FourDeletedRecordsInSingleExtent) {
{DiskLoc(0, 1912), 24},
{DiskLoc(0, 1628), 84},
{}};
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, NULL, &em, md);
ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
}
@@ -526,7 +526,7 @@ TEST(CappedRecordStoreV1Scrambler, FourDeletedRecordsInSingleExtent) {
class CollscanHelper {
public:
CollscanHelper(int nExtents)
- : md(new DummyRecordStoreV1MetaData(true, 0)), rs(&txn, &cb, ns(), md, &em, false) {
+ : md(new DummyRecordStoreV1MetaData(true, 0)), rs(&opCtx, &cb, ns(), md, &em, false) {
LocAndSize recs[] = {{}};
LocAndSize drecs[8];
ASSERT_LESS_THAN(nExtents, 8);
@@ -537,9 +537,9 @@ public:
drecs[nExtents].loc = DiskLoc();
drecs[nExtents].size = 0;
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid()); // unlooped
- initializeV1RS(&txn, recs, drecs, NULL, &em, md);
+ md->setCapExtent(&opCtx, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&opCtx, DiskLoc().setInvalid()); // unlooped
+ initializeV1RS(&opCtx, recs, drecs, NULL, &em, md);
}
// Insert bypasses standard alloc/insert routines to use the extent we want.
@@ -551,7 +551,7 @@ public:
BSONObj o = b.done();
int len = o.objsize();
Extent* e = em.getExtent(ext);
- e = txn.recoveryUnit()->writing(e);
+ e = opCtx.recoveryUnit()->writing(e);
int ofs;
if (e->lastRecord.isNull()) {
ofs = ext.getOfs() + (e->_extentData - (char*)e);
@@ -560,7 +560,7 @@ public:
}
DiskLoc dl(ext.a(), ofs);
MmapV1RecordHeader* r = em.recordForV1(dl);
- r = (MmapV1RecordHeader*)txn.recoveryUnit()->writingPtr(
+ r = (MmapV1RecordHeader*)opCtx.recoveryUnit()->writingPtr(
r, MmapV1RecordHeader::HeaderSize + len);
r->lengthWithHeaders() = MmapV1RecordHeader::HeaderSize + len;
r->extentOfs() = e->myLoc.getOfs();
@@ -570,7 +570,7 @@ public:
if (e->firstRecord.isNull())
e->firstRecord = dl;
else
- txn.recoveryUnit()->writingInt(em.recordForV1(e->lastRecord)->nextOfs()) = ofs;
+ opCtx.recoveryUnit()->writingInt(em.recordForV1(e->lastRecord)->nextOfs()) = ofs;
e->lastRecord = dl;
return dl;
}
@@ -579,7 +579,7 @@ public:
void walkAndCount(int expectedCount) {
// Walk the collection going forward.
{
- CappedRecordStoreV1Iterator cursor(&txn, &rs, /*forward=*/true);
+ CappedRecordStoreV1Iterator cursor(&opCtx, &rs, /*forward=*/true);
int resultCount = 0;
while (auto record = cursor.next()) {
++resultCount;
@@ -590,7 +590,7 @@ public:
// Walk the collection going backwards.
{
- CappedRecordStoreV1Iterator cursor(&txn, &rs, /*forward=*/false);
+ CappedRecordStoreV1Iterator cursor(&opCtx, &rs, /*forward=*/false);
int resultCount = expectedCount;
while (auto record = cursor.next()) {
--resultCount;
@@ -604,7 +604,7 @@ public:
return "unittests.QueryStageCollectionScanCapped";
}
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyRecordStoreV1MetaData* md;
DummyExtentManager em;
@@ -621,27 +621,27 @@ TEST(CappedRecordStoreV1QueryStage, CollscanCappedBase) {
TEST(CappedRecordStoreV1QueryStage, CollscanEmptyLooped) {
CollscanHelper h(1);
- h.md->setCapFirstNewRecord(&h.txn, DiskLoc());
+ h.md->setCapFirstNewRecord(&h.opCtx, DiskLoc());
h.walkAndCount(0);
}
TEST(CappedRecordStoreV1QueryStage, CollscanEmptyMultiExtentLooped) {
CollscanHelper h(3);
- h.md->setCapFirstNewRecord(&h.txn, DiskLoc());
+ h.md->setCapFirstNewRecord(&h.opCtx, DiskLoc());
h.walkAndCount(0);
}
TEST(CappedRecordStoreV1QueryStage, CollscanSingle) {
CollscanHelper h(1);
- h.md->setCapFirstNewRecord(&h.txn, h.insert(h.md->capExtent(), 0));
+ h.md->setCapFirstNewRecord(&h.opCtx, h.insert(h.md->capExtent(), 0));
h.walkAndCount(1);
}
TEST(CappedRecordStoreV1QueryStage, CollscanNewCapFirst) {
CollscanHelper h(1);
DiskLoc x = h.insert(h.md->capExtent(), 0);
- h.md->setCapFirstNewRecord(&h.txn, x);
+ h.md->setCapFirstNewRecord(&h.opCtx, x);
h.insert(h.md->capExtent(), 1);
h.walkAndCount(2);
}
@@ -649,7 +649,7 @@ TEST(CappedRecordStoreV1QueryStage, CollscanNewCapFirst) {
TEST(CappedRecordStoreV1QueryStage, CollscanNewCapMiddle) {
CollscanHelper h(1);
h.insert(h.md->capExtent(), 0);
- h.md->setCapFirstNewRecord(&h.txn, h.insert(h.md->capExtent(), 1));
+ h.md->setCapFirstNewRecord(&h.opCtx, h.insert(h.md->capExtent(), 1));
h.insert(h.md->capExtent(), 2);
h.walkAndCount(3);
}
@@ -657,59 +657,59 @@ TEST(CappedRecordStoreV1QueryStage, CollscanNewCapMiddle) {
TEST(CappedRecordStoreV1QueryStage, CollscanFirstExtent) {
CollscanHelper h(2);
h.insert(h.md->capExtent(), 0);
- h.insert(h.md->lastExtent(&h.txn), 1);
- h.md->setCapFirstNewRecord(&h.txn, h.insert(h.md->capExtent(), 2));
+ h.insert(h.md->lastExtent(&h.opCtx), 1);
+ h.md->setCapFirstNewRecord(&h.opCtx, h.insert(h.md->capExtent(), 2));
h.insert(h.md->capExtent(), 3);
h.walkAndCount(4);
}
TEST(CappedRecordStoreV1QueryStage, CollscanLastExtent) {
CollscanHelper h(2);
- h.md->setCapExtent(&h.txn, h.md->lastExtent(&h.txn));
+ h.md->setCapExtent(&h.opCtx, h.md->lastExtent(&h.opCtx));
h.insert(h.md->capExtent(), 0);
- h.insert(h.md->firstExtent(&h.txn), 1);
- h.md->setCapFirstNewRecord(&h.txn, h.insert(h.md->capExtent(), 2));
+ h.insert(h.md->firstExtent(&h.opCtx), 1);
+ h.md->setCapFirstNewRecord(&h.opCtx, h.insert(h.md->capExtent(), 2));
h.insert(h.md->capExtent(), 3);
h.walkAndCount(4);
}
TEST(CappedRecordStoreV1QueryStage, CollscanMidExtent) {
CollscanHelper h(3);
- h.md->setCapExtent(&h.txn, h.em.getExtent(h.md->firstExtent(&h.txn))->xnext);
+ h.md->setCapExtent(&h.opCtx, h.em.getExtent(h.md->firstExtent(&h.opCtx))->xnext);
h.insert(h.md->capExtent(), 0);
- h.insert(h.md->lastExtent(&h.txn), 1);
- h.insert(h.md->firstExtent(&h.txn), 2);
- h.md->setCapFirstNewRecord(&h.txn, h.insert(h.md->capExtent(), 3));
+ h.insert(h.md->lastExtent(&h.opCtx), 1);
+ h.insert(h.md->firstExtent(&h.opCtx), 2);
+ h.md->setCapFirstNewRecord(&h.opCtx, h.insert(h.md->capExtent(), 3));
h.insert(h.md->capExtent(), 4);
h.walkAndCount(5);
}
TEST(CappedRecordStoreV1QueryStage, CollscanAloneInExtent) {
CollscanHelper h(3);
- h.md->setCapExtent(&h.txn, h.em.getExtent(h.md->firstExtent(&h.txn))->xnext);
- h.insert(h.md->lastExtent(&h.txn), 0);
- h.insert(h.md->firstExtent(&h.txn), 1);
- h.md->setCapFirstNewRecord(&h.txn, h.insert(h.md->capExtent(), 2));
+ h.md->setCapExtent(&h.opCtx, h.em.getExtent(h.md->firstExtent(&h.opCtx))->xnext);
+ h.insert(h.md->lastExtent(&h.opCtx), 0);
+ h.insert(h.md->firstExtent(&h.opCtx), 1);
+ h.md->setCapFirstNewRecord(&h.opCtx, h.insert(h.md->capExtent(), 2));
h.walkAndCount(3);
}
TEST(CappedRecordStoreV1QueryStage, CollscanFirstInExtent) {
CollscanHelper h(3);
- h.md->setCapExtent(&h.txn, h.em.getExtent(h.md->firstExtent(&h.txn))->xnext);
- h.insert(h.md->lastExtent(&h.txn), 0);
- h.insert(h.md->firstExtent(&h.txn), 1);
- h.md->setCapFirstNewRecord(&h.txn, h.insert(h.md->capExtent(), 2));
+ h.md->setCapExtent(&h.opCtx, h.em.getExtent(h.md->firstExtent(&h.opCtx))->xnext);
+ h.insert(h.md->lastExtent(&h.opCtx), 0);
+ h.insert(h.md->firstExtent(&h.opCtx), 1);
+ h.md->setCapFirstNewRecord(&h.opCtx, h.insert(h.md->capExtent(), 2));
h.insert(h.md->capExtent(), 3);
h.walkAndCount(4);
}
TEST(CappedRecordStoreV1QueryStage, CollscanLastInExtent) {
CollscanHelper h(3);
- h.md->setCapExtent(&h.txn, h.em.getExtent(h.md->firstExtent(&h.txn))->xnext);
+ h.md->setCapExtent(&h.opCtx, h.em.getExtent(h.md->firstExtent(&h.opCtx))->xnext);
h.insert(h.md->capExtent(), 0);
- h.insert(h.md->lastExtent(&h.txn), 1);
- h.insert(h.md->firstExtent(&h.txn), 2);
- h.md->setCapFirstNewRecord(&h.txn, h.insert(h.md->capExtent(), 3));
+ h.insert(h.md->lastExtent(&h.opCtx), 1);
+ h.insert(h.md->firstExtent(&h.opCtx), 2);
+ h.md->setCapFirstNewRecord(&h.opCtx, h.insert(h.md->capExtent(), 3));
h.walkAndCount(4);
}
}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.cpp
index ac8f083eb82..872c29e112b 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.cpp
@@ -40,9 +40,9 @@ namespace mongo {
using std::endl;
-RecordStoreV1RepairCursor::RecordStoreV1RepairCursor(OperationContext* txn,
+RecordStoreV1RepairCursor::RecordStoreV1RepairCursor(OperationContext* opCtx,
const RecordStoreV1Base* recordStore)
- : _txn(txn), _recordStore(recordStore), _stage(FORWARD_SCAN) {
+ : _opCtx(opCtx), _recordStore(recordStore), _stage(FORWARD_SCAN) {
// Position the iterator at the first record
//
advance();
@@ -53,7 +53,7 @@ boost::optional<Record> RecordStoreV1RepairCursor::next() {
return {};
auto out = _currRecord.toRecordId();
advance();
- return {{out, _recordStore->dataFor(_txn, out)}};
+ return {{out, _recordStore->dataFor(_opCtx, out)}};
}
void RecordStoreV1RepairCursor::advance() {
@@ -76,10 +76,10 @@ void RecordStoreV1RepairCursor::advance() {
} else {
switch (_stage) {
case FORWARD_SCAN:
- _currRecord = _recordStore->getNextRecordInExtent(_txn, _currRecord);
+ _currRecord = _recordStore->getNextRecordInExtent(_opCtx, _currRecord);
break;
case BACKWARD_SCAN:
- _currRecord = _recordStore->getPrevRecordInExtent(_txn, _currRecord);
+ _currRecord = _recordStore->getPrevRecordInExtent(_opCtx, _currRecord);
break;
default:
invariant(!"This should never be reached.");
@@ -116,10 +116,10 @@ bool RecordStoreV1RepairCursor::_advanceToNextValidExtent() {
if (_currExtent.isNull()) {
switch (_stage) {
case FORWARD_SCAN:
- _currExtent = _recordStore->details()->firstExtent(_txn);
+ _currExtent = _recordStore->details()->firstExtent(_opCtx);
break;
case BACKWARD_SCAN:
- _currExtent = _recordStore->details()->lastExtent(_txn);
+ _currExtent = _recordStore->details()->lastExtent(_opCtx);
break;
default:
invariant(DONE == _stage);
@@ -181,7 +181,7 @@ bool RecordStoreV1RepairCursor::_advanceToNextValidExtent() {
return true;
}
-void RecordStoreV1RepairCursor::invalidate(OperationContext* txn, const RecordId& id) {
+void RecordStoreV1RepairCursor::invalidate(OperationContext* opCtx, const RecordId& id) {
// If we see this record again it probably means it was reinserted rather than an infinite
// loop. If we do loop, we should quickly hit another seen record that hasn't been
// invalidated.
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.h b/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.h
index b65782cd27b..d95683a7c42 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.h
@@ -42,19 +42,19 @@ namespace mongo {
*/
class RecordStoreV1RepairCursor final : public RecordCursor {
public:
- RecordStoreV1RepairCursor(OperationContext* txn, const RecordStoreV1Base* recordStore);
+ RecordStoreV1RepairCursor(OperationContext* opCtx, const RecordStoreV1Base* recordStore);
boost::optional<Record> next() final;
- void invalidate(OperationContext* txn, const RecordId& dl);
+ void invalidate(OperationContext* opCtx, const RecordId& dl);
void save() final {}
bool restore() final {
return true;
}
void detachFromOperationContext() final {
- _txn = nullptr;
+ _opCtx = nullptr;
}
- void reattachToOperationContext(OperationContext* txn) final {
- _txn = txn;
+ void reattachToOperationContext(OperationContext* opCtx) final {
+ _opCtx = opCtx;
}
// Explicitly not supporting fetcherForNext(). The expected use case for this class is a
@@ -74,7 +74,7 @@ private:
bool _advanceToNextValidExtent();
// transactional context for read locks. Not owned by us
- OperationContext* _txn;
+ OperationContext* _opCtx;
// Reference to the owning RecordStore. The store must not be deleted while there are
// active iterators on it.
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
index dfe5860ce33..0b1e2f867c1 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
@@ -70,7 +70,7 @@ static ServerStatusMetricField<Counter64> dFreelist2("storage.freelist.search.bu
static ServerStatusMetricField<Counter64> dFreelist3("storage.freelist.search.scanned",
&freelistIterations);
-SimpleRecordStoreV1::SimpleRecordStoreV1(OperationContext* txn,
+SimpleRecordStoreV1::SimpleRecordStoreV1(OperationContext* opCtx,
StringData ns,
RecordStoreV1MetaData* details,
ExtentManager* em,
@@ -82,7 +82,7 @@ SimpleRecordStoreV1::SimpleRecordStoreV1(OperationContext* txn,
SimpleRecordStoreV1::~SimpleRecordStoreV1() {}
-DiskLoc SimpleRecordStoreV1::_allocFromExistingExtents(OperationContext* txn, int lenToAllocRaw) {
+DiskLoc SimpleRecordStoreV1::_allocFromExistingExtents(OperationContext* opCtx, int lenToAllocRaw) {
// Slowly drain the deletedListLegacyGrabBag by popping one record off and putting it in the
// correct deleted list each time we try to allocate a new record. This ensures we won't
// orphan any data when upgrading from old versions, without needing a long upgrade phase.
@@ -91,8 +91,8 @@ DiskLoc SimpleRecordStoreV1::_allocFromExistingExtents(OperationContext* txn, in
{
const DiskLoc head = _details->deletedListLegacyGrabBag();
if (!head.isNull()) {
- _details->setDeletedListLegacyGrabBag(txn, drec(head)->nextDeleted());
- addDeletedRec(txn, head);
+ _details->setDeletedListLegacyGrabBag(opCtx, drec(head)->nextDeleted());
+ addDeletedRec(opCtx, head);
}
}
@@ -122,8 +122,8 @@ DiskLoc SimpleRecordStoreV1::_allocFromExistingExtents(OperationContext* txn, in
return DiskLoc(); // no space
// Unlink ourself from the deleted list
- _details->setDeletedListEntry(txn, myBucket, dr->nextDeleted());
- *txn->recoveryUnit()->writing(&dr->nextDeleted()) = DiskLoc().setInvalid(); // defensive
+ _details->setDeletedListEntry(opCtx, myBucket, dr->nextDeleted());
+ *opCtx->recoveryUnit()->writing(&dr->nextDeleted()) = DiskLoc().setInvalid(); // defensive
}
invariant(dr->extentOfs() < loc.getOfs());
@@ -132,20 +132,20 @@ DiskLoc SimpleRecordStoreV1::_allocFromExistingExtents(OperationContext* txn, in
// allocation size. Otherwise, just take the whole DeletedRecord.
const int remainingLength = dr->lengthWithHeaders() - lenToAlloc;
if (remainingLength >= bucketSizes[0]) {
- txn->recoveryUnit()->writingInt(dr->lengthWithHeaders()) = lenToAlloc;
+ opCtx->recoveryUnit()->writingInt(dr->lengthWithHeaders()) = lenToAlloc;
const DiskLoc newDelLoc = DiskLoc(loc.a(), loc.getOfs() + lenToAlloc);
- DeletedRecord* newDel = txn->recoveryUnit()->writing(drec(newDelLoc));
+ DeletedRecord* newDel = opCtx->recoveryUnit()->writing(drec(newDelLoc));
newDel->extentOfs() = dr->extentOfs();
newDel->lengthWithHeaders() = remainingLength;
newDel->nextDeleted().Null();
- addDeletedRec(txn, newDelLoc);
+ addDeletedRec(opCtx, newDelLoc);
}
return loc;
}
-StatusWith<DiskLoc> SimpleRecordStoreV1::allocRecord(OperationContext* txn,
+StatusWith<DiskLoc> SimpleRecordStoreV1::allocRecord(OperationContext* opCtx,
int lengthWithHeaders,
bool enforceQuota) {
if (lengthWithHeaders > MaxAllowedAllocation) {
@@ -156,18 +156,18 @@ StatusWith<DiskLoc> SimpleRecordStoreV1::allocRecord(OperationContext* txn,
<< " > 16.5MB");
}
- DiskLoc loc = _allocFromExistingExtents(txn, lengthWithHeaders);
+ DiskLoc loc = _allocFromExistingExtents(opCtx, lengthWithHeaders);
if (!loc.isNull())
return StatusWith<DiskLoc>(loc);
LOG(1) << "allocating new extent";
increaseStorageSize(
- txn,
- _extentManager->followupSize(lengthWithHeaders, _details->lastExtentSize(txn)),
+ opCtx,
+ _extentManager->followupSize(lengthWithHeaders, _details->lastExtentSize(opCtx)),
enforceQuota);
- loc = _allocFromExistingExtents(txn, lengthWithHeaders);
+ loc = _allocFromExistingExtents(opCtx, lengthWithHeaders);
if (!loc.isNull()) {
// got on first try
return StatusWith<DiskLoc>(loc);
@@ -175,17 +175,17 @@ StatusWith<DiskLoc> SimpleRecordStoreV1::allocRecord(OperationContext* txn,
log() << "warning: alloc() failed after allocating new extent. "
<< "lengthWithHeaders: " << lengthWithHeaders
- << " last extent size:" << _details->lastExtentSize(txn) << "; trying again";
+ << " last extent size:" << _details->lastExtentSize(opCtx) << "; trying again";
- for (int z = 0; z < 10 && lengthWithHeaders > _details->lastExtentSize(txn); z++) {
+ for (int z = 0; z < 10 && lengthWithHeaders > _details->lastExtentSize(opCtx); z++) {
log() << "try #" << z << endl;
increaseStorageSize(
- txn,
- _extentManager->followupSize(lengthWithHeaders, _details->lastExtentSize(txn)),
+ opCtx,
+ _extentManager->followupSize(lengthWithHeaders, _details->lastExtentSize(opCtx)),
enforceQuota);
- loc = _allocFromExistingExtents(txn, lengthWithHeaders);
+ loc = _allocFromExistingExtents(opCtx, lengthWithHeaders);
if (!loc.isNull())
return StatusWith<DiskLoc>(loc);
}
@@ -193,8 +193,8 @@ StatusWith<DiskLoc> SimpleRecordStoreV1::allocRecord(OperationContext* txn,
return StatusWith<DiskLoc>(ErrorCodes::InternalError, "cannot allocate space");
}
-Status SimpleRecordStoreV1::truncate(OperationContext* txn) {
- const DiskLoc firstExtLoc = _details->firstExtent(txn);
+Status SimpleRecordStoreV1::truncate(OperationContext* opCtx) {
+ const DiskLoc firstExtLoc = _details->firstExtent(opCtx);
if (firstExtLoc.isNull() || !firstExtLoc.isValid()) {
// Already empty
return Status::OK();
@@ -204,53 +204,53 @@ Status SimpleRecordStoreV1::truncate(OperationContext* txn) {
Extent* firstExt = _extentManager->getExtent(firstExtLoc);
if (!firstExt->xnext.isNull()) {
const DiskLoc extNextLoc = firstExt->xnext;
- const DiskLoc oldLastExtLoc = _details->lastExtent(txn);
+ const DiskLoc oldLastExtLoc = _details->lastExtent(opCtx);
Extent* const nextExt = _extentManager->getExtent(extNextLoc);
// Unlink other extents;
- *txn->recoveryUnit()->writing(&nextExt->xprev) = DiskLoc();
- *txn->recoveryUnit()->writing(&firstExt->xnext) = DiskLoc();
- _details->setLastExtent(txn, firstExtLoc);
- _details->setLastExtentSize(txn, firstExt->length);
+ *opCtx->recoveryUnit()->writing(&nextExt->xprev) = DiskLoc();
+ *opCtx->recoveryUnit()->writing(&firstExt->xnext) = DiskLoc();
+ _details->setLastExtent(opCtx, firstExtLoc);
+ _details->setLastExtentSize(opCtx, firstExt->length);
- _extentManager->freeExtents(txn, extNextLoc, oldLastExtLoc);
+ _extentManager->freeExtents(opCtx, extNextLoc, oldLastExtLoc);
}
// Make the first (now only) extent a single large deleted record.
- *txn->recoveryUnit()->writing(&firstExt->firstRecord) = DiskLoc();
- *txn->recoveryUnit()->writing(&firstExt->lastRecord) = DiskLoc();
- _details->orphanDeletedList(txn);
- addDeletedRec(txn, _findFirstSpot(txn, firstExtLoc, firstExt));
+ *opCtx->recoveryUnit()->writing(&firstExt->firstRecord) = DiskLoc();
+ *opCtx->recoveryUnit()->writing(&firstExt->lastRecord) = DiskLoc();
+ _details->orphanDeletedList(opCtx);
+ addDeletedRec(opCtx, _findFirstSpot(opCtx, firstExtLoc, firstExt));
// Make stats reflect that there are now no documents in this record store.
- _details->setStats(txn, 0, 0);
+ _details->setStats(opCtx, 0, 0);
return Status::OK();
}
-void SimpleRecordStoreV1::addDeletedRec(OperationContext* txn, const DiskLoc& dloc) {
+void SimpleRecordStoreV1::addDeletedRec(OperationContext* opCtx, const DiskLoc& dloc) {
DeletedRecord* d = drec(dloc);
int b = bucket(d->lengthWithHeaders());
- *txn->recoveryUnit()->writing(&d->nextDeleted()) = _details->deletedListEntry(b);
- _details->setDeletedListEntry(txn, b, dloc);
+ *opCtx->recoveryUnit()->writing(&d->nextDeleted()) = _details->deletedListEntry(b);
+ _details->setDeletedListEntry(opCtx, b, dloc);
}
-std::unique_ptr<SeekableRecordCursor> SimpleRecordStoreV1::getCursor(OperationContext* txn,
+std::unique_ptr<SeekableRecordCursor> SimpleRecordStoreV1::getCursor(OperationContext* opCtx,
bool forward) const {
- return stdx::make_unique<SimpleRecordStoreV1Iterator>(txn, this, forward);
+ return stdx::make_unique<SimpleRecordStoreV1Iterator>(opCtx, this, forward);
}
vector<std::unique_ptr<RecordCursor>> SimpleRecordStoreV1::getManyCursors(
- OperationContext* txn) const {
+ OperationContext* opCtx) const {
vector<std::unique_ptr<RecordCursor>> cursors;
const Extent* ext;
- for (DiskLoc extLoc = details()->firstExtent(txn); !extLoc.isNull(); extLoc = ext->xnext) {
- ext = _getExtent(txn, extLoc);
+ for (DiskLoc extLoc = details()->firstExtent(opCtx); !extLoc.isNull(); extLoc = ext->xnext) {
+ ext = _getExtent(opCtx, extLoc);
if (ext->firstRecord.isNull())
continue;
- cursors.push_back(
- stdx::make_unique<RecordStoreV1Base::IntraExtentIterator>(txn, ext->firstRecord, this));
+ cursors.push_back(stdx::make_unique<RecordStoreV1Base::IntraExtentIterator>(
+ opCtx, ext->firstRecord, this));
}
return cursors;
@@ -284,7 +284,7 @@ private:
size_t _allocationSize;
};
-void SimpleRecordStoreV1::_compactExtent(OperationContext* txn,
+void SimpleRecordStoreV1::_compactExtent(OperationContext* opCtx,
const DiskLoc extentLoc,
int extentNumber,
RecordStoreCompactAdaptor* adaptor,
@@ -322,12 +322,12 @@ void SimpleRecordStoreV1::_compactExtent(OperationContext* txn,
long long nrecords = 0;
DiskLoc nextSourceLoc = sourceExtent->firstRecord;
while (!nextSourceLoc.isNull()) {
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
MmapV1RecordHeader* recOld = recordFor(nextSourceLoc);
RecordData oldData = recOld->toRecordData();
- nextSourceLoc = getNextRecordInExtent(txn, nextSourceLoc);
+ nextSourceLoc = getNextRecordInExtent(opCtx, nextSourceLoc);
if (compactOptions->validateDocuments && !adaptor->isDataValid(oldData)) {
// object is corrupt!
@@ -369,7 +369,7 @@ void SimpleRecordStoreV1::_compactExtent(OperationContext* txn,
// start of the compact, this insert will allocate a record in a new extent.
// See the comment in compact() for more details.
CompactDocWriter writer(recOld, rawDataSize, allocationSize);
- StatusWith<RecordId> status = insertRecordWithDocWriter(txn, &writer);
+ StatusWith<RecordId> status = insertRecordWithDocWriter(opCtx, &writer);
uassertStatusOK(status.getStatus());
const MmapV1RecordHeader* newRec =
recordFor(DiskLoc::fromRecordId(status.getValue()));
@@ -384,18 +384,18 @@ void SimpleRecordStoreV1::_compactExtent(OperationContext* txn,
// Remove the old record from the linked list of records withing the sourceExtent.
// The old record is not added to the freelist as we will be freeing the whole
// extent at the end.
- *txn->recoveryUnit()->writing(&sourceExtent->firstRecord) = nextSourceLoc;
+ *opCtx->recoveryUnit()->writing(&sourceExtent->firstRecord) = nextSourceLoc;
if (nextSourceLoc.isNull()) {
// Just moved the last record out of the extent. Mark extent as empty.
- *txn->recoveryUnit()->writing(&sourceExtent->lastRecord) = DiskLoc();
+ *opCtx->recoveryUnit()->writing(&sourceExtent->lastRecord) = DiskLoc();
} else {
MmapV1RecordHeader* newFirstRecord = recordFor(nextSourceLoc);
- txn->recoveryUnit()->writingInt(newFirstRecord->prevOfs()) = DiskLoc::NullOfs;
+ opCtx->recoveryUnit()->writingInt(newFirstRecord->prevOfs()) = DiskLoc::NullOfs;
}
// Adjust the stats to reflect the removal of the old record. The insert above
// handled adjusting the stats for the new record.
- _details->incrementStats(txn, -(recOld->netLength()), -1);
+ _details->incrementStats(opCtx, -(recOld->netLength()), -1);
wunit.commit();
}
@@ -405,16 +405,16 @@ void SimpleRecordStoreV1::_compactExtent(OperationContext* txn,
invariant(sourceExtent->lastRecord.isNull());
// We are still the first extent, but we must not be the only extent.
- invariant(_details->firstExtent(txn) == extentLoc);
- invariant(_details->lastExtent(txn) != extentLoc);
+ invariant(_details->firstExtent(opCtx) == extentLoc);
+ invariant(_details->lastExtent(opCtx) != extentLoc);
// Remove the newly emptied sourceExtent from the extent linked list and return it to
// the extent manager.
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
const DiskLoc newFirst = sourceExtent->xnext;
- _details->setFirstExtent(txn, newFirst);
- *txn->recoveryUnit()->writing(&_extentManager->getExtent(newFirst)->xprev) = DiskLoc();
- _extentManager->freeExtent(txn, extentLoc);
+ _details->setFirstExtent(opCtx, newFirst);
+ *opCtx->recoveryUnit()->writing(&_extentManager->getExtent(newFirst)->xprev) = DiskLoc();
+ _extentManager->freeExtent(opCtx, extentLoc);
wunit.commit();
{
@@ -428,53 +428,53 @@ void SimpleRecordStoreV1::_compactExtent(OperationContext* txn,
}
}
-Status SimpleRecordStoreV1::compact(OperationContext* txn,
+Status SimpleRecordStoreV1::compact(OperationContext* opCtx,
RecordStoreCompactAdaptor* adaptor,
const CompactOptions* options,
CompactStats* stats) {
std::vector<DiskLoc> extents;
- for (DiskLoc extLocation = _details->firstExtent(txn); !extLocation.isNull();
+ for (DiskLoc extLocation = _details->firstExtent(opCtx); !extLocation.isNull();
extLocation = _extentManager->getExtent(extLocation)->xnext) {
extents.push_back(extLocation);
}
log() << "compact " << extents.size() << " extents";
{
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
// Orphaning the deleted lists ensures that all inserts go to new extents rather than
// the ones that existed before starting the compact. If we abort the operation before
// completion, any free space in the old extents will be leaked and never reused unless
// the collection is compacted again or dropped. This is considered an acceptable
// failure mode as no data will be lost.
log() << "compact orphan deleted lists" << endl;
- _details->orphanDeletedList(txn);
+ _details->orphanDeletedList(opCtx);
// Start over from scratch with our extent sizing and growth
- _details->setLastExtentSize(txn, 0);
+ _details->setLastExtentSize(opCtx, 0);
// create a new extent so new records go there
- increaseStorageSize(txn, _details->lastExtentSize(txn), true);
+ increaseStorageSize(opCtx, _details->lastExtentSize(opCtx), true);
wunit.commit();
}
- stdx::unique_lock<Client> lk(*txn->getClient());
+ stdx::unique_lock<Client> lk(*opCtx->getClient());
ProgressMeterHolder pm(
- *txn->setMessage_inlock("compact extent", "Extent Compacting Progress", extents.size()));
+ *opCtx->setMessage_inlock("compact extent", "Extent Compacting Progress", extents.size()));
lk.unlock();
// Go through all old extents and move each record to a new set of extents.
int extentNumber = 0;
for (std::vector<DiskLoc>::iterator it = extents.begin(); it != extents.end(); it++) {
- txn->checkForInterrupt();
- invariant(_details->firstExtent(txn) == *it);
+ opCtx->checkForInterrupt();
+ invariant(_details->firstExtent(opCtx) == *it);
// empties and removes the first extent
- _compactExtent(txn, *it, extentNumber++, adaptor, options, stats);
- invariant(_details->firstExtent(txn) != *it);
+ _compactExtent(opCtx, *it, extentNumber++, adaptor, options, stats);
+ invariant(_details->firstExtent(opCtx) != *it);
pm.hit();
}
- invariant(_extentManager->getExtent(_details->firstExtent(txn))->xprev.isNull());
- invariant(_extentManager->getExtent(_details->lastExtent(txn))->xnext.isNull());
+ invariant(_extentManager->getExtent(_details->firstExtent(opCtx))->xprev.isNull());
+ invariant(_extentManager->getExtent(_details->lastExtent(opCtx))->xnext.isNull());
// indexes will do their own progress meter
pm.finished();
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple.h b/src/mongo/db/storage/mmap_v1/record_store_v1_simple.h
index 80fe4e8018b..61c04bbf420 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple.h
@@ -41,7 +41,7 @@ class SimpleRecordStoreV1Cursor;
// used by index and original collections
class SimpleRecordStoreV1 : public RecordStoreV1Base {
public:
- SimpleRecordStoreV1(OperationContext* txn,
+ SimpleRecordStoreV1(OperationContext* opCtx,
StringData ns,
RecordStoreV1MetaData* details,
ExtentManager* em,
@@ -53,14 +53,14 @@ public:
return "SimpleRecordStoreV1";
}
- std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* txn,
+ std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* opCtx,
bool forward) const final;
- std::vector<std::unique_ptr<RecordCursor>> getManyCursors(OperationContext* txn) const final;
+ std::vector<std::unique_ptr<RecordCursor>> getManyCursors(OperationContext* opCtx) const final;
- virtual Status truncate(OperationContext* txn);
+ virtual Status truncate(OperationContext* opCtx);
- virtual void cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive) {
+ virtual void cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) {
invariant(!"cappedTruncateAfter not supported");
}
@@ -70,7 +70,7 @@ public:
virtual bool compactsInPlace() const {
return false;
}
- virtual Status compact(OperationContext* txn,
+ virtual Status compact(OperationContext* opCtx,
RecordStoreCompactAdaptor* adaptor,
const CompactOptions* options,
CompactStats* stats);
@@ -83,16 +83,16 @@ protected:
return !_details->isUserFlagSet(CollectionOptions::Flag_NoPadding);
}
- virtual StatusWith<DiskLoc> allocRecord(OperationContext* txn,
+ virtual StatusWith<DiskLoc> allocRecord(OperationContext* opCtx,
int lengthWithHeaders,
bool enforceQuota);
- virtual void addDeletedRec(OperationContext* txn, const DiskLoc& dloc);
+ virtual void addDeletedRec(OperationContext* opCtx, const DiskLoc& dloc);
private:
- DiskLoc _allocFromExistingExtents(OperationContext* txn, int lengthWithHeaders);
+ DiskLoc _allocFromExistingExtents(OperationContext* opCtx, int lengthWithHeaders);
- void _compactExtent(OperationContext* txn,
+ void _compactExtent(OperationContext* opCtx,
const DiskLoc diskloc,
int extentNumber,
RecordStoreCompactAdaptor* adaptor,
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.cpp
index 81cd3456a07..414e1016a6b 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.cpp
@@ -39,18 +39,18 @@ namespace mongo {
// Regular / non-capped collection traversal
//
-SimpleRecordStoreV1Iterator::SimpleRecordStoreV1Iterator(OperationContext* txn,
+SimpleRecordStoreV1Iterator::SimpleRecordStoreV1Iterator(OperationContext* opCtx,
const SimpleRecordStoreV1* collection,
bool forward)
- : _txn(txn), _recordStore(collection), _forward(forward) {
+ : _opCtx(opCtx), _recordStore(collection), _forward(forward) {
// Eagerly seek to first Record on creation since it is cheap.
const ExtentManager* em = _recordStore->_extentManager;
- if (_recordStore->details()->firstExtent(txn).isNull()) {
+ if (_recordStore->details()->firstExtent(opCtx).isNull()) {
// nothing in the collection
- verify(_recordStore->details()->lastExtent(txn).isNull());
+ verify(_recordStore->details()->lastExtent(opCtx).isNull());
} else if (_forward) {
// Find a non-empty extent and start with the first record in it.
- Extent* e = em->getExtent(_recordStore->details()->firstExtent(txn));
+ Extent* e = em->getExtent(_recordStore->details()->firstExtent(opCtx));
while (e->firstRecord.isNull() && !e->xnext.isNull()) {
e = em->getExtent(e->xnext);
@@ -62,7 +62,7 @@ SimpleRecordStoreV1Iterator::SimpleRecordStoreV1Iterator(OperationContext* txn,
} else {
// Walk backwards, skipping empty extents, and use the last record in the first
// non-empty extent we see.
- Extent* e = em->getExtent(_recordStore->details()->lastExtent(txn));
+ Extent* e = em->getExtent(_recordStore->details()->lastExtent(opCtx));
// TODO ELABORATE
// Does one of e->lastRecord.isNull(), e.firstRecord.isNull() imply the other?
@@ -81,33 +81,33 @@ boost::optional<Record> SimpleRecordStoreV1Iterator::next() {
return {};
auto toReturn = _curr.toRecordId();
advance();
- return {{toReturn, _recordStore->RecordStore::dataFor(_txn, toReturn)}};
+ return {{toReturn, _recordStore->RecordStore::dataFor(_opCtx, toReturn)}};
}
boost::optional<Record> SimpleRecordStoreV1Iterator::seekExact(const RecordId& id) {
_curr = DiskLoc::fromRecordId(id);
advance();
- return {{id, _recordStore->RecordStore::dataFor(_txn, id)}};
+ return {{id, _recordStore->RecordStore::dataFor(_opCtx, id)}};
}
void SimpleRecordStoreV1Iterator::advance() {
// Move to the next thing.
if (!isEOF()) {
if (_forward) {
- _curr = _recordStore->getNextRecord(_txn, _curr);
+ _curr = _recordStore->getNextRecord(_opCtx, _curr);
} else {
- _curr = _recordStore->getPrevRecord(_txn, _curr);
+ _curr = _recordStore->getPrevRecord(_opCtx, _curr);
}
}
}
-void SimpleRecordStoreV1Iterator::invalidate(OperationContext* txn, const RecordId& dl) {
+void SimpleRecordStoreV1Iterator::invalidate(OperationContext* opCtx, const RecordId& dl) {
// Just move past the thing being deleted.
if (dl == _curr.toRecordId()) {
const DiskLoc origLoc = _curr;
// Undo the advance on rollback, as the deletion that forced it "never happened".
- txn->recoveryUnit()->onRollback([this, origLoc]() { this->_curr = origLoc; });
+ opCtx->recoveryUnit()->onRollback([this, origLoc]() { this->_curr = origLoc; });
advance();
}
}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.h b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.h
index a480566f9d7..dd54877ee93 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.h
@@ -43,7 +43,7 @@ class SimpleRecordStoreV1;
*/
class SimpleRecordStoreV1Iterator final : public SeekableRecordCursor {
public:
- SimpleRecordStoreV1Iterator(OperationContext* txn,
+ SimpleRecordStoreV1Iterator(OperationContext* opCtx,
const SimpleRecordStoreV1* records,
bool forward);
@@ -52,12 +52,12 @@ public:
void save() final;
bool restore() final;
void detachFromOperationContext() final {
- _txn = nullptr;
+ _opCtx = nullptr;
}
- void reattachToOperationContext(OperationContext* txn) final {
- _txn = txn;
+ void reattachToOperationContext(OperationContext* opCtx) final {
+ _opCtx = opCtx;
}
- void invalidate(OperationContext* txn, const RecordId& dl) final;
+ void invalidate(OperationContext* opCtx, const RecordId& dl) final;
std::unique_ptr<RecordFetcher> fetcherForNext() const final;
std::unique_ptr<RecordFetcher> fetcherForId(const RecordId& id) const final;
@@ -68,7 +68,7 @@ private:
}
// for getNext, not owned
- OperationContext* _txn;
+ OperationContext* _opCtx;
// The result returned on the next call to getNext().
DiskLoc _curr;
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp
index 573d4975fbf..e49ac7c1301 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp
@@ -115,128 +115,134 @@ private:
/** alloc() quantizes the requested size using quantizeAllocationSpace() rules. */
TEST(SimpleRecordStoreV1, AllocQuantized) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
string myns = "test.AllocQuantized";
- SimpleRecordStoreV1 rs(&txn, myns, md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, myns, md, &em, false);
BSONObj obj = docForRecordSize(300);
- StatusWith<RecordId> result = rs.insertRecord(&txn, obj.objdata(), obj.objsize(), false);
+ StatusWith<RecordId> result = rs.insertRecord(&opCtx, obj.objdata(), obj.objsize(), false);
ASSERT(result.isOK());
// The length of the allocated record is quantized.
- ASSERT_EQUALS(512, rs.dataFor(&txn, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
+ ASSERT_EQUALS(512,
+ rs.dataFor(&opCtx, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
}
TEST(SimpleRecordStoreV1, AllocNonQuantized) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
- md->setUserFlag(&txn, CollectionOptions::Flag_NoPadding);
+ md->setUserFlag(&opCtx, CollectionOptions::Flag_NoPadding);
string myns = "test.AllocQuantized";
- SimpleRecordStoreV1 rs(&txn, myns, md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, myns, md, &em, false);
BSONObj obj = docForRecordSize(300);
- StatusWith<RecordId> result = rs.insertRecord(&txn, obj.objdata(), obj.objsize(), false);
+ StatusWith<RecordId> result = rs.insertRecord(&opCtx, obj.objdata(), obj.objsize(), false);
ASSERT(result.isOK());
// The length of the allocated record is quantized.
- ASSERT_EQUALS(300, rs.dataFor(&txn, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
+ ASSERT_EQUALS(300,
+ rs.dataFor(&opCtx, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
}
TEST(SimpleRecordStoreV1, AllocNonQuantizedStillAligned) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
- md->setUserFlag(&txn, CollectionOptions::Flag_NoPadding);
+ md->setUserFlag(&opCtx, CollectionOptions::Flag_NoPadding);
string myns = "test.AllocQuantized";
- SimpleRecordStoreV1 rs(&txn, myns, md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, myns, md, &em, false);
BSONObj obj = docForRecordSize(298);
- StatusWith<RecordId> result = rs.insertRecord(&txn, obj.objdata(), obj.objsize(), false);
+ StatusWith<RecordId> result = rs.insertRecord(&opCtx, obj.objdata(), obj.objsize(), false);
ASSERT(result.isOK());
// The length of the allocated record is quantized.
- ASSERT_EQUALS(300, rs.dataFor(&txn, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
+ ASSERT_EQUALS(300,
+ rs.dataFor(&opCtx, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
}
/** alloc() quantizes the requested size if DocWriter::addPadding() returns true. */
TEST(SimpleRecordStoreV1, AllocQuantizedWithDocWriter) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
string myns = "test.AllocQuantized";
- SimpleRecordStoreV1 rs(&txn, myns, md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, myns, md, &em, false);
BsonDocWriter docWriter(docForRecordSize(300), true);
- StatusWith<RecordId> result = rs.insertRecordWithDocWriter(&txn, &docWriter);
+ StatusWith<RecordId> result = rs.insertRecordWithDocWriter(&opCtx, &docWriter);
ASSERT(result.isOK());
// The length of the allocated record is quantized.
- ASSERT_EQUALS(512, rs.dataFor(&txn, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
+ ASSERT_EQUALS(512,
+ rs.dataFor(&opCtx, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
}
/**
* alloc() does not quantize records if DocWriter::addPadding() returns false
*/
TEST(SimpleRecordStoreV1, AllocNonQuantizedDocWriter) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
string myns = "test.AllocIndexNamespaceNotQuantized";
- SimpleRecordStoreV1 rs(&txn, myns + "$x", md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, myns + "$x", md, &em, false);
BsonDocWriter docWriter(docForRecordSize(300), false);
- StatusWith<RecordId> result = rs.insertRecordWithDocWriter(&txn, &docWriter);
+ StatusWith<RecordId> result = rs.insertRecordWithDocWriter(&opCtx, &docWriter);
ASSERT(result.isOK());
// The length of the allocated record is not quantized.
- ASSERT_EQUALS(300, rs.dataFor(&txn, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
+ ASSERT_EQUALS(300,
+ rs.dataFor(&opCtx, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
}
/** alloc() aligns record sizes up to 4 bytes even if DocWriter::addPadding returns false. */
TEST(SimpleRecordStoreV1, AllocAlignedDocWriter) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
string myns = "test.AllocIndexNamespaceNotQuantized";
- SimpleRecordStoreV1 rs(&txn, myns + "$x", md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, myns + "$x", md, &em, false);
BsonDocWriter docWriter(docForRecordSize(298), false);
- StatusWith<RecordId> result = rs.insertRecordWithDocWriter(&txn, &docWriter);
+ StatusWith<RecordId> result = rs.insertRecordWithDocWriter(&opCtx, &docWriter);
ASSERT(result.isOK());
- ASSERT_EQUALS(300, rs.dataFor(&txn, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
+ ASSERT_EQUALS(300,
+ rs.dataFor(&opCtx, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
}
/**
* alloc() with quantized size doesn't split if enough room left over.
*/
TEST(SimpleRecordStoreV1, AllocUseQuantizedDeletedRecordWithoutSplit) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
- SimpleRecordStoreV1 rs(&txn, "test.foo", md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, "test.foo", md, &em, false);
{
LocAndSize drecs[] = {{DiskLoc(0, 1000), 512 + 31}, {}};
- initializeV1RS(&txn, NULL, drecs, NULL, &em, md);
+ initializeV1RS(&opCtx, NULL, drecs, NULL, &em, md);
}
BsonDocWriter docWriter(docForRecordSize(300), true);
- StatusWith<RecordId> actualLocation = rs.insertRecordWithDocWriter(&txn, &docWriter);
+ StatusWith<RecordId> actualLocation = rs.insertRecordWithDocWriter(&opCtx, &docWriter);
ASSERT_OK(actualLocation.getStatus());
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 512 + 31}, {}};
LocAndSize drecs[] = {{}};
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, NULL, &em, md);
}
}
@@ -244,24 +250,24 @@ TEST(SimpleRecordStoreV1, AllocUseQuantizedDeletedRecordWithoutSplit) {
* alloc() with quantized size splits if enough room left over.
*/
TEST(SimpleRecordStoreV1, AllocUseQuantizedDeletedRecordWithSplit) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
- SimpleRecordStoreV1 rs(&txn, "test.foo", md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, "test.foo", md, &em, false);
{
LocAndSize drecs[] = {{DiskLoc(0, 1000), 512 + 32}, {}};
- initializeV1RS(&txn, NULL, drecs, NULL, &em, md);
+ initializeV1RS(&opCtx, NULL, drecs, NULL, &em, md);
}
BsonDocWriter docWriter(docForRecordSize(300), true);
- StatusWith<RecordId> actualLocation = rs.insertRecordWithDocWriter(&txn, &docWriter);
+ StatusWith<RecordId> actualLocation = rs.insertRecordWithDocWriter(&opCtx, &docWriter);
ASSERT_OK(actualLocation.getStatus());
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 512}, {}};
LocAndSize drecs[] = {{DiskLoc(0, 1512), 32}, {}};
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, NULL, &em, md);
}
}
@@ -269,24 +275,24 @@ TEST(SimpleRecordStoreV1, AllocUseQuantizedDeletedRecordWithSplit) {
* alloc() with non quantized size doesn't split if enough room left over.
*/
TEST(SimpleRecordStoreV1, AllocUseNonQuantizedDeletedRecordWithoutSplit) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
- SimpleRecordStoreV1 rs(&txn, "test.foo", md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, "test.foo", md, &em, false);
{
LocAndSize drecs[] = {{DiskLoc(0, 1000), 331}, {}};
- initializeV1RS(&txn, NULL, drecs, NULL, &em, md);
+ initializeV1RS(&opCtx, NULL, drecs, NULL, &em, md);
}
BsonDocWriter docWriter(docForRecordSize(300), false);
- StatusWith<RecordId> actualLocation = rs.insertRecordWithDocWriter(&txn, &docWriter);
+ StatusWith<RecordId> actualLocation = rs.insertRecordWithDocWriter(&opCtx, &docWriter);
ASSERT_OK(actualLocation.getStatus());
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 331}, {}};
LocAndSize drecs[] = {{}};
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, NULL, &em, md);
}
}
@@ -294,24 +300,24 @@ TEST(SimpleRecordStoreV1, AllocUseNonQuantizedDeletedRecordWithoutSplit) {
* alloc() with non quantized size splits if enough room left over.
*/
TEST(SimpleRecordStoreV1, AllocUseNonQuantizedDeletedRecordWithSplit) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
- SimpleRecordStoreV1 rs(&txn, "test.foo", md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, "test.foo", md, &em, false);
{
LocAndSize drecs[] = {{DiskLoc(0, 1000), 332}, {}};
- initializeV1RS(&txn, NULL, drecs, NULL, &em, md);
+ initializeV1RS(&opCtx, NULL, drecs, NULL, &em, md);
}
BsonDocWriter docWriter(docForRecordSize(300), false);
- StatusWith<RecordId> actualLocation = rs.insertRecordWithDocWriter(&txn, &docWriter);
+ StatusWith<RecordId> actualLocation = rs.insertRecordWithDocWriter(&opCtx, &docWriter);
ASSERT_OK(actualLocation.getStatus());
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 300}, {}};
LocAndSize drecs[] = {{DiskLoc(0, 1300), 32}, {}};
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, NULL, &em, md);
}
}
@@ -319,27 +325,27 @@ TEST(SimpleRecordStoreV1, AllocUseNonQuantizedDeletedRecordWithSplit) {
* alloc() will use from the legacy grab bag if it can.
*/
TEST(SimpleRecordStoreV1, GrabBagIsUsed) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
- SimpleRecordStoreV1 rs(&txn, "test.foo", md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, "test.foo", md, &em, false);
{
LocAndSize drecs[] = {{}};
LocAndSize grabBag[] = {
{DiskLoc(0, 1000), 4 * 1024 * 1024}, {DiskLoc(1, 1000), 4 * 1024 * 1024}, {}};
- initializeV1RS(&txn, NULL, drecs, grabBag, &em, md);
+ initializeV1RS(&opCtx, NULL, drecs, grabBag, &em, md);
}
BsonDocWriter docWriter(docForRecordSize(256), false);
- StatusWith<RecordId> actualLocation = rs.insertRecordWithDocWriter(&txn, &docWriter);
+ StatusWith<RecordId> actualLocation = rs.insertRecordWithDocWriter(&opCtx, &docWriter);
ASSERT_OK(actualLocation.getStatus());
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 256}, {}};
LocAndSize drecs[] = {{DiskLoc(0, 1256), 4 * 1024 * 1024 - 256}, {}};
LocAndSize grabBag[] = {{DiskLoc(1, 1000), 4 * 1024 * 1024}, {}};
- assertStateV1RS(&txn, recs, drecs, grabBag, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, grabBag, &em, md);
}
}
@@ -347,27 +353,27 @@ TEST(SimpleRecordStoreV1, GrabBagIsUsed) {
* alloc() will pull from the legacy grab bag even if it isn't needed.
*/
TEST(SimpleRecordStoreV1, GrabBagIsPoppedEvenIfUnneeded) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
- SimpleRecordStoreV1 rs(&txn, "test.foo", md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, "test.foo", md, &em, false);
{
LocAndSize drecs[] = {{DiskLoc(0, 1000), 1000}, {}};
LocAndSize grabBag[] = {
{DiskLoc(1, 1000), 4 * 1024 * 1024}, {DiskLoc(2, 1000), 4 * 1024 * 1024}, {}};
- initializeV1RS(&txn, NULL, drecs, grabBag, &em, md);
+ initializeV1RS(&opCtx, NULL, drecs, grabBag, &em, md);
}
BsonDocWriter docWriter(docForRecordSize(1000), false);
- StatusWith<RecordId> actualLocation = rs.insertRecordWithDocWriter(&txn, &docWriter);
+ StatusWith<RecordId> actualLocation = rs.insertRecordWithDocWriter(&opCtx, &docWriter);
ASSERT_OK(actualLocation.getStatus());
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 1000}, {}};
LocAndSize drecs[] = {{DiskLoc(1, 1000), 4 * 1024 * 1024}, {}};
LocAndSize grabBag[] = {{DiskLoc(2, 1000), 4 * 1024 * 1024}, {}};
- assertStateV1RS(&txn, recs, drecs, grabBag, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, grabBag, &em, md);
}
}
@@ -375,54 +381,54 @@ TEST(SimpleRecordStoreV1, GrabBagIsPoppedEvenIfUnneeded) {
* alloc() will pull from the legacy grab bag even if it can't be used
*/
TEST(SimpleRecordStoreV1, GrabBagIsPoppedEvenIfUnusable) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
- SimpleRecordStoreV1 rs(&txn, "test.foo", md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, "test.foo", md, &em, false);
{
LocAndSize drecs[] = {{DiskLoc(0, 1000), 8 * 1024 * 1024}, {}};
LocAndSize grabBag[] = {
{DiskLoc(1, 1000), 4 * 1024 * 1024}, {DiskLoc(2, 1000), 4 * 1024 * 1024}, {}};
- initializeV1RS(&txn, NULL, drecs, grabBag, &em, md);
+ initializeV1RS(&opCtx, NULL, drecs, grabBag, &em, md);
}
BsonDocWriter docWriter(docForRecordSize(8 * 1024 * 1024), false);
- StatusWith<RecordId> actualLocation = rs.insertRecordWithDocWriter(&txn, &docWriter);
+ StatusWith<RecordId> actualLocation = rs.insertRecordWithDocWriter(&opCtx, &docWriter);
ASSERT_OK(actualLocation.getStatus());
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 8 * 1024 * 1024}, {}};
LocAndSize drecs[] = {{DiskLoc(1, 1000), 4 * 1024 * 1024}, {}};
LocAndSize grabBag[] = {{DiskLoc(2, 1000), 4 * 1024 * 1024}, {}};
- assertStateV1RS(&txn, recs, drecs, grabBag, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, grabBag, &em, md);
}
}
// -----------------
TEST(SimpleRecordStoreV1, FullSimple1) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
- SimpleRecordStoreV1 rs(&txn, "test.foo", md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, "test.foo", md, &em, false);
ASSERT_EQUALS(0, md->numRecords());
- StatusWith<RecordId> result = rs.insertRecord(&txn, "abc", 4, 1000);
+ StatusWith<RecordId> result = rs.insertRecord(&opCtx, "abc", 4, 1000);
ASSERT_TRUE(result.isOK());
ASSERT_EQUALS(1, md->numRecords());
- RecordData recordData = rs.dataFor(&txn, result.getValue());
+ RecordData recordData = rs.dataFor(&opCtx, result.getValue());
ASSERT_EQUALS(string("abc"), string(recordData.data()));
}
// -----------------
TEST(SimpleRecordStoreV1, Truncate) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
- SimpleRecordStoreV1 rs(&txn, "test.foo", md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, "test.foo", md, &em, false);
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 100},
@@ -433,12 +439,12 @@ TEST(SimpleRecordStoreV1, Truncate) {
LocAndSize drecs[] = {
{DiskLoc(0, 1200), 100}, {DiskLoc(2, 1000), 100}, {DiskLoc(1, 1000), 1000}, {}};
- initializeV1RS(&txn, recs, drecs, NULL, &em, md);
+ initializeV1RS(&opCtx, recs, drecs, NULL, &em, md);
ASSERT_EQUALS(em.getExtent(DiskLoc(0, 0))->length, em.minSize());
}
- rs.truncate(&txn);
+ rs.truncate(&opCtx);
{
LocAndSize recs[] = {{}};
@@ -446,7 +452,7 @@ TEST(SimpleRecordStoreV1, Truncate) {
// One extent filled with a single deleted record.
{DiskLoc(0, Extent::HeaderSize()), em.minSize() - Extent::HeaderSize()},
{}};
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, NULL, &em, md);
}
}
}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp
index 6f4d3993cbe..3872e4bccd0 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp
@@ -68,7 +68,7 @@ const DiskLoc& DummyRecordStoreV1MetaData::capExtent() const {
return _capExtent;
}
-void DummyRecordStoreV1MetaData::setCapExtent(OperationContext* txn, const DiskLoc& loc) {
+void DummyRecordStoreV1MetaData::setCapExtent(OperationContext* opCtx, const DiskLoc& loc) {
_capExtent = loc;
}
@@ -76,7 +76,7 @@ const DiskLoc& DummyRecordStoreV1MetaData::capFirstNewRecord() const {
return _capFirstNewRecord;
}
-void DummyRecordStoreV1MetaData::setCapFirstNewRecord(OperationContext* txn, const DiskLoc& loc) {
+void DummyRecordStoreV1MetaData::setCapFirstNewRecord(OperationContext* opCtx, const DiskLoc& loc) {
_capFirstNewRecord = loc;
}
@@ -88,14 +88,14 @@ long long DummyRecordStoreV1MetaData::numRecords() const {
return _numRecords;
}
-void DummyRecordStoreV1MetaData::incrementStats(OperationContext* txn,
+void DummyRecordStoreV1MetaData::incrementStats(OperationContext* opCtx,
long long dataSizeIncrement,
long long numRecordsIncrement) {
_dataSize += dataSizeIncrement;
_numRecords += numRecordsIncrement;
}
-void DummyRecordStoreV1MetaData::setStats(OperationContext* txn,
+void DummyRecordStoreV1MetaData::setStats(OperationContext* opCtx,
long long dataSize,
long long numRecords) {
_dataSize = dataSize;
@@ -113,7 +113,7 @@ DiskLoc DummyRecordStoreV1MetaData::deletedListEntry(int bucket) const {
return _deletedLists[bucket];
}
-void DummyRecordStoreV1MetaData::setDeletedListEntry(OperationContext* txn,
+void DummyRecordStoreV1MetaData::setDeletedListEntry(OperationContext* opCtx,
int bucket,
const DiskLoc& loc) {
invariant(bucket >= 0);
@@ -127,29 +127,29 @@ DiskLoc DummyRecordStoreV1MetaData::deletedListLegacyGrabBag() const {
return _deletedListLegacyGrabBag;
}
-void DummyRecordStoreV1MetaData::setDeletedListLegacyGrabBag(OperationContext* txn,
+void DummyRecordStoreV1MetaData::setDeletedListLegacyGrabBag(OperationContext* opCtx,
const DiskLoc& loc) {
_deletedListLegacyGrabBag = loc;
}
-void DummyRecordStoreV1MetaData::orphanDeletedList(OperationContext* txn) {
+void DummyRecordStoreV1MetaData::orphanDeletedList(OperationContext* opCtx) {
// They will be recreated on demand.
_deletedLists.clear();
}
-const DiskLoc& DummyRecordStoreV1MetaData::firstExtent(OperationContext* txn) const {
+const DiskLoc& DummyRecordStoreV1MetaData::firstExtent(OperationContext* opCtx) const {
return _firstExtent;
}
-void DummyRecordStoreV1MetaData::setFirstExtent(OperationContext* txn, const DiskLoc& loc) {
+void DummyRecordStoreV1MetaData::setFirstExtent(OperationContext* opCtx, const DiskLoc& loc) {
_firstExtent = loc;
}
-const DiskLoc& DummyRecordStoreV1MetaData::lastExtent(OperationContext* txn) const {
+const DiskLoc& DummyRecordStoreV1MetaData::lastExtent(OperationContext* opCtx) const {
return _lastExtent;
}
-void DummyRecordStoreV1MetaData::setLastExtent(OperationContext* txn, const DiskLoc& loc) {
+void DummyRecordStoreV1MetaData::setLastExtent(OperationContext* opCtx, const DiskLoc& loc) {
_lastExtent = loc;
}
@@ -161,21 +161,21 @@ bool DummyRecordStoreV1MetaData::isUserFlagSet(int flag) const {
return _userFlags & flag;
}
-bool DummyRecordStoreV1MetaData::setUserFlag(OperationContext* txn, int flag) {
+bool DummyRecordStoreV1MetaData::setUserFlag(OperationContext* opCtx, int flag) {
if ((_userFlags & flag) == flag)
return false;
_userFlags |= flag;
return true;
}
-bool DummyRecordStoreV1MetaData::clearUserFlag(OperationContext* txn, int flag) {
+bool DummyRecordStoreV1MetaData::clearUserFlag(OperationContext* opCtx, int flag) {
if ((_userFlags & flag) == 0)
return false;
_userFlags &= ~flag;
return true;
}
-bool DummyRecordStoreV1MetaData::replaceUserFlags(OperationContext* txn, int flags) {
+bool DummyRecordStoreV1MetaData::replaceUserFlags(OperationContext* opCtx, int flags) {
if (_userFlags == flags)
return false;
_userFlags = flags;
@@ -183,11 +183,11 @@ bool DummyRecordStoreV1MetaData::replaceUserFlags(OperationContext* txn, int fla
}
-int DummyRecordStoreV1MetaData::lastExtentSize(OperationContext* txn) const {
+int DummyRecordStoreV1MetaData::lastExtentSize(OperationContext* opCtx) const {
return _lastExtentSize;
}
-void DummyRecordStoreV1MetaData::setLastExtentSize(OperationContext* txn, int newMax) {
+void DummyRecordStoreV1MetaData::setLastExtentSize(OperationContext* opCtx, int newMax) {
_lastExtentSize = newMax;
}
@@ -204,9 +204,9 @@ DummyExtentManager::~DummyExtentManager() {
}
}
-void DummyExtentManager::close(OperationContext* txn) {}
+void DummyExtentManager::close(OperationContext* opCtx) {}
-Status DummyExtentManager::init(OperationContext* txn) {
+Status DummyExtentManager::init(OperationContext* opCtx) {
return Status::OK();
}
@@ -219,7 +219,7 @@ long long DummyExtentManager::fileSize() const {
return -1;
}
-DiskLoc DummyExtentManager::allocateExtent(OperationContext* txn,
+DiskLoc DummyExtentManager::allocateExtent(OperationContext* opCtx,
bool capped,
int size,
bool enforceQuota) {
@@ -244,14 +244,14 @@ DiskLoc DummyExtentManager::allocateExtent(OperationContext* txn,
return loc;
}
-void DummyExtentManager::freeExtents(OperationContext* txn, DiskLoc firstExt, DiskLoc lastExt) {
+void DummyExtentManager::freeExtents(OperationContext* opCtx, DiskLoc firstExt, DiskLoc lastExt) {
// XXX
}
-void DummyExtentManager::freeExtent(OperationContext* txn, DiskLoc extent) {
+void DummyExtentManager::freeExtent(OperationContext* opCtx, DiskLoc extent) {
// XXX
}
-void DummyExtentManager::freeListStats(OperationContext* txn,
+void DummyExtentManager::freeListStats(OperationContext* opCtx,
int* numExtents,
int64_t* totalFreeSizeBytes) const {
invariant(false);
@@ -297,11 +297,11 @@ DummyExtentManager::CacheHint* DummyExtentManager::cacheHint(const DiskLoc& exte
return new CacheHint();
}
-DataFileVersion DummyExtentManager::getFileFormat(OperationContext* txn) const {
+DataFileVersion DummyExtentManager::getFileFormat(OperationContext* opCtx) const {
return DataFileVersion::defaultForNewFiles();
}
-void DummyExtentManager::setFileFormat(OperationContext* txn, DataFileVersion newVersion) {}
+void DummyExtentManager::setFileFormat(OperationContext* opCtx, DataFileVersion newVersion) {}
const DataFile* DummyExtentManager::getOpenFile(int n) const {
return nullptr;
@@ -324,9 +324,11 @@ void accumulateExtentSizeRequirements(const LocAndSize* las, std::map<int, size_
}
}
-void printRecList(OperationContext* txn, const ExtentManager* em, const RecordStoreV1MetaData* md) {
+void printRecList(OperationContext* opCtx,
+ const ExtentManager* em,
+ const RecordStoreV1MetaData* md) {
log() << " *** BEGIN ACTUAL RECORD LIST *** ";
- DiskLoc extLoc = md->firstExtent(txn);
+ DiskLoc extLoc = md->firstExtent(opCtx);
std::set<DiskLoc> seenLocs;
while (!extLoc.isNull()) {
Extent* ext = em->getExtent(extLoc, true);
@@ -380,7 +382,7 @@ void printDRecList(const ExtentManager* em, const RecordStoreV1MetaData* md) {
}
}
-void initializeV1RS(OperationContext* txn,
+void initializeV1RS(OperationContext* opCtx,
const LocAndSize* records,
const LocAndSize* drecs,
const LocAndSize* legacyGrabBag,
@@ -390,7 +392,7 @@ void initializeV1RS(OperationContext* txn,
// Need to start with a blank slate
invariant(em->numFiles() == 0);
- invariant(md->firstExtent(txn).isNull());
+ invariant(md->firstExtent(opCtx).isNull());
// pre-allocate extents (even extents that aren't part of this RS)
{
@@ -404,7 +406,7 @@ void initializeV1RS(OperationContext* txn,
const int maxExtent = extentSizes.rbegin()->first;
for (int i = 0; i <= maxExtent; i++) {
const size_t size = extentSizes.count(i) ? extentSizes[i] : 0;
- const DiskLoc loc = em->allocateExtent(txn, md->isCapped(), size, 0);
+ const DiskLoc loc = em->allocateExtent(opCtx, md->isCapped(), size, 0);
// This function and assertState depend on these details of DummyExtentManager
invariant(loc.a() == i);
@@ -412,8 +414,8 @@ void initializeV1RS(OperationContext* txn,
}
// link together extents that should be part of this RS
- md->setFirstExtent(txn, DiskLoc(extentSizes.begin()->first, 0));
- md->setLastExtent(txn, DiskLoc(extentSizes.rbegin()->first, 0));
+ md->setFirstExtent(opCtx, DiskLoc(extentSizes.begin()->first, 0));
+ md->setLastExtent(opCtx, DiskLoc(extentSizes.rbegin()->first, 0));
for (ExtentSizes::iterator it = extentSizes.begin(); boost::next(it) != extentSizes.end();
/* ++it */) {
const int a = it->first;
@@ -425,12 +427,12 @@ void initializeV1RS(OperationContext* txn,
// This signals "done allocating new extents".
if (md->isCapped())
- md->setDeletedListEntry(txn, 1, DiskLoc());
+ md->setDeletedListEntry(opCtx, 1, DiskLoc());
}
if (records && !records[0].loc.isNull()) {
int recIdx = 0;
- DiskLoc extLoc = md->firstExtent(txn);
+ DiskLoc extLoc = md->firstExtent(opCtx);
while (!extLoc.isNull()) {
Extent* ext = em->getExtent(extLoc);
int prevOfs = DiskLoc::NullOfs;
@@ -440,7 +442,7 @@ void initializeV1RS(OperationContext* txn,
;
invariant(size >= MmapV1RecordHeader::HeaderSize);
- md->incrementStats(txn, size - MmapV1RecordHeader::HeaderSize, 1);
+ md->incrementStats(opCtx, size - MmapV1RecordHeader::HeaderSize, 1);
if (ext->firstRecord.isNull())
ext->firstRecord = loc;
@@ -480,7 +482,7 @@ void initializeV1RS(OperationContext* txn,
if (md->isCapped()) {
// All drecs form a single list in bucket 0
if (prevNextPtr == NULL) {
- md->setDeletedListEntry(txn, 0, loc);
+ md->setDeletedListEntry(opCtx, 0, loc);
} else {
*prevNextPtr = loc;
}
@@ -488,11 +490,11 @@ void initializeV1RS(OperationContext* txn,
if (loc.a() < md->capExtent().a() &&
drecs[drecIdx + 1].loc.a() == md->capExtent().a()) {
// Bucket 1 is known as cappedLastDelRecLastExtent
- md->setDeletedListEntry(txn, 1, loc);
+ md->setDeletedListEntry(opCtx, 1, loc);
}
} else if (bucket != lastBucket) {
invariant(bucket > lastBucket); // if this fails, drecs weren't sorted by bucket
- md->setDeletedListEntry(txn, bucket, loc);
+ md->setDeletedListEntry(opCtx, bucket, loc);
lastBucket = bucket;
} else {
*prevNextPtr = loc;
@@ -519,7 +521,7 @@ void initializeV1RS(OperationContext* txn,
invariant(size >= MmapV1RecordHeader::HeaderSize);
if (grabBagIdx == 0) {
- md->setDeletedListLegacyGrabBag(txn, loc);
+ md->setDeletedListLegacyGrabBag(opCtx, loc);
} else {
*prevNextPtr = loc;
}
@@ -535,10 +537,10 @@ void initializeV1RS(OperationContext* txn,
}
// Make sure we set everything up as requested.
- assertStateV1RS(txn, records, drecs, legacyGrabBag, em, md);
+ assertStateV1RS(opCtx, records, drecs, legacyGrabBag, em, md);
}
-void assertStateV1RS(OperationContext* txn,
+void assertStateV1RS(OperationContext* opCtx,
const LocAndSize* records,
const LocAndSize* drecs,
const LocAndSize* legacyGrabBag,
@@ -553,7 +555,7 @@ void assertStateV1RS(OperationContext* txn,
int recIdx = 0;
- DiskLoc extLoc = md->firstExtent(txn);
+ DiskLoc extLoc = md->firstExtent(opCtx);
while (!extLoc.isNull()) { // for each Extent
Extent* ext = em->getExtent(extLoc, true);
int expectedPrevOfs = DiskLoc::NullOfs;
@@ -579,7 +581,7 @@ void assertStateV1RS(OperationContext* txn,
}
if (ext->xnext.isNull()) {
- ASSERT_EQUALS(md->lastExtent(txn), extLoc);
+ ASSERT_EQUALS(md->lastExtent(opCtx), extLoc);
}
extLoc = ext->xnext;
@@ -602,7 +604,7 @@ void assertStateV1RS(OperationContext* txn,
// the first drec in the capExtent. If the capExtent is the first Extent,
// it should be Null.
- if (md->capExtent() == md->firstExtent(txn)) {
+ if (md->capExtent() == md->firstExtent(opCtx)) {
ASSERT_EQUALS(actualLoc, DiskLoc());
} else {
ASSERT_NOT_EQUALS(actualLoc.a(), md->capExtent().a());
@@ -659,7 +661,7 @@ void assertStateV1RS(OperationContext* txn,
}
} catch (...) {
// If a test fails, provide extra info to make debugging easier
- printRecList(txn, em, md);
+ printRecList(opCtx, em, md);
printDRecList(em, md);
throw;
}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h b/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h
index eac135dd24a..c9af1e5cc36 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h
@@ -44,33 +44,33 @@ public:
virtual ~DummyRecordStoreV1MetaData() {}
virtual const DiskLoc& capExtent() const;
- virtual void setCapExtent(OperationContext* txn, const DiskLoc& loc);
+ virtual void setCapExtent(OperationContext* opCtx, const DiskLoc& loc);
virtual const DiskLoc& capFirstNewRecord() const;
- virtual void setCapFirstNewRecord(OperationContext* txn, const DiskLoc& loc);
+ virtual void setCapFirstNewRecord(OperationContext* opCtx, const DiskLoc& loc);
virtual long long dataSize() const;
virtual long long numRecords() const;
- virtual void incrementStats(OperationContext* txn,
+ virtual void incrementStats(OperationContext* opCtx,
long long dataSizeIncrement,
long long numRecordsIncrement);
- virtual void setStats(OperationContext* txn, long long dataSize, long long numRecords);
+ virtual void setStats(OperationContext* opCtx, long long dataSize, long long numRecords);
virtual DiskLoc deletedListEntry(int bucket) const;
- virtual void setDeletedListEntry(OperationContext* txn, int bucket, const DiskLoc& loc);
+ virtual void setDeletedListEntry(OperationContext* opCtx, int bucket, const DiskLoc& loc);
virtual DiskLoc deletedListLegacyGrabBag() const;
- virtual void setDeletedListLegacyGrabBag(OperationContext* txn, const DiskLoc& loc);
+ virtual void setDeletedListLegacyGrabBag(OperationContext* opCtx, const DiskLoc& loc);
- virtual void orphanDeletedList(OperationContext* txn);
+ virtual void orphanDeletedList(OperationContext* opCtx);
- virtual const DiskLoc& firstExtent(OperationContext* txn) const;
- virtual void setFirstExtent(OperationContext* txn, const DiskLoc& loc);
+ virtual const DiskLoc& firstExtent(OperationContext* opCtx) const;
+ virtual void setFirstExtent(OperationContext* opCtx, const DiskLoc& loc);
- virtual const DiskLoc& lastExtent(OperationContext* txn) const;
- virtual void setLastExtent(OperationContext* txn, const DiskLoc& loc);
+ virtual const DiskLoc& lastExtent(OperationContext* opCtx) const;
+ virtual void setLastExtent(OperationContext* opCtx, const DiskLoc& loc);
virtual bool isCapped() const;
@@ -78,13 +78,13 @@ public:
virtual int userFlags() const {
return _userFlags;
}
- virtual bool setUserFlag(OperationContext* txn, int flag);
- virtual bool clearUserFlag(OperationContext* txn, int flag);
- virtual bool replaceUserFlags(OperationContext* txn, int flags);
+ virtual bool setUserFlag(OperationContext* opCtx, int flag);
+ virtual bool clearUserFlag(OperationContext* opCtx, int flag);
+ virtual bool replaceUserFlags(OperationContext* opCtx, int flags);
- virtual int lastExtentSize(OperationContext* txn) const;
- virtual void setLastExtentSize(OperationContext* txn, int newMax);
+ virtual int lastExtentSize(OperationContext* opCtx) const;
+ virtual void setLastExtentSize(OperationContext* opCtx, int newMax);
virtual long long maxCappedDocs() const;
@@ -113,20 +113,23 @@ class DummyExtentManager : public ExtentManager {
public:
virtual ~DummyExtentManager();
- virtual void close(OperationContext* txn);
+ virtual void close(OperationContext* opCtx);
- virtual Status init(OperationContext* txn);
+ virtual Status init(OperationContext* opCtx);
virtual int numFiles() const;
virtual long long fileSize() const;
- virtual DiskLoc allocateExtent(OperationContext* txn, bool capped, int size, bool enforceQuota);
+ virtual DiskLoc allocateExtent(OperationContext* opCtx,
+ bool capped,
+ int size,
+ bool enforceQuota);
- virtual void freeExtents(OperationContext* txn, DiskLoc firstExt, DiskLoc lastExt);
+ virtual void freeExtents(OperationContext* opCtx, DiskLoc firstExt, DiskLoc lastExt);
- virtual void freeExtent(OperationContext* txn, DiskLoc extent);
+ virtual void freeExtent(OperationContext* opCtx, DiskLoc extent);
- virtual void freeListStats(OperationContext* txn,
+ virtual void freeListStats(OperationContext* opCtx,
int* numExtents,
int64_t* totalFreeSizeBytes) const;
@@ -144,9 +147,9 @@ public:
virtual CacheHint* cacheHint(const DiskLoc& extentLoc, const HintType& hint);
- DataFileVersion getFileFormat(OperationContext* txn) const final;
+ DataFileVersion getFileFormat(OperationContext* opCtx) const final;
- virtual void setFileFormat(OperationContext* txn, DataFileVersion newVersion) final;
+ virtual void setFileFormat(OperationContext* opCtx, DataFileVersion newVersion) final;
const DataFile* getOpenFile(int n) const final;
@@ -184,7 +187,7 @@ struct LocAndSize {
*
* ExtentManager and MetaData must both be empty.
*/
-void initializeV1RS(OperationContext* txn,
+void initializeV1RS(OperationContext* opCtx,
const LocAndSize* records,
const LocAndSize* drecs,
const LocAndSize* legacyGrabBag,
@@ -198,7 +201,7 @@ void initializeV1RS(OperationContext* txn,
* List of LocAndSize are terminated by a Null DiskLoc. Passing a NULL pointer means don't check
* that list.
*/
-void assertStateV1RS(OperationContext* txn,
+void assertStateV1RS(OperationContext* opCtx,
const LocAndSize* records,
const LocAndSize* drecs,
const LocAndSize* legacyGrabBag,
diff --git a/src/mongo/db/storage/mmap_v1/repair_database.cpp b/src/mongo/db/storage/mmap_v1/repair_database.cpp
index ea76462eaad..c321cd5513c 100644
--- a/src/mongo/db/storage/mmap_v1/repair_database.cpp
+++ b/src/mongo/db/storage/mmap_v1/repair_database.cpp
@@ -236,11 +236,11 @@ void _applyOpToDataFiles(const string& database,
class RepairFileDeleter {
public:
- RepairFileDeleter(OperationContext* txn,
+ RepairFileDeleter(OperationContext* opCtx,
const string& dbName,
const string& pathString,
const Path& path)
- : _txn(txn), _dbName(dbName), _pathString(pathString), _path(path), _success(false) {}
+ : _opCtx(opCtx), _dbName(dbName), _pathString(pathString), _path(path), _success(false) {}
~RepairFileDeleter() {
if (_success)
@@ -250,10 +250,10 @@ public:
<< "db: " << _dbName << " path: " << _pathString;
try {
- getDur().syncDataAndTruncateJournal(_txn);
+ getDur().syncDataAndTruncateJournal(_opCtx);
// need both in case journaling is disabled
- MongoFile::flushAll(_txn, true);
+ MongoFile::flushAll(_opCtx, true);
MONGO_ASSERT_ON_EXCEPTION(boost::filesystem::remove_all(_path));
} catch (DBException& e) {
@@ -268,21 +268,21 @@ public:
}
private:
- OperationContext* _txn;
+ OperationContext* _opCtx;
string _dbName;
string _pathString;
Path _path;
bool _success;
};
-Status MMAPV1Engine::repairDatabase(OperationContext* txn,
+Status MMAPV1Engine::repairDatabase(OperationContext* opCtx,
const std::string& dbName,
bool preserveClonedFilesOnFailure,
bool backupOriginalFiles) {
unique_ptr<RepairFileDeleter> repairFileDeleter;
// Must be done before and after repair
- getDur().syncDataAndTruncateJournal(txn);
+ getDur().syncDataAndTruncateJournal(opCtx);
intmax_t totalSize = dbSize(dbName);
intmax_t freeSize = File::freeSpace(storageGlobalParams.repairpath);
@@ -296,7 +296,7 @@ Status MMAPV1Engine::repairDatabase(OperationContext* txn,
<< " (bytes)");
}
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
Path reservedPath = uniqueReservedPath(
(preserveClonedFilesOnFailure || backupOriginalFiles) ? "backup" : "_tmp");
@@ -307,10 +307,10 @@ Status MMAPV1Engine::repairDatabase(OperationContext* txn,
if (!preserveClonedFilesOnFailure)
repairFileDeleter.reset(
- new RepairFileDeleter(txn, dbName, reservedPathString, reservedPath));
+ new RepairFileDeleter(opCtx, dbName, reservedPathString, reservedPath));
{
- Database* originalDatabase = dbHolder().openDb(txn, dbName);
+ Database* originalDatabase = dbHolder().openDb(opCtx, dbName);
if (originalDatabase == NULL) {
return Status(ErrorCodes::NamespaceNotFound, "database does not exist to repair");
}
@@ -319,30 +319,30 @@ Status MMAPV1Engine::repairDatabase(OperationContext* txn,
unique_ptr<Database> tempDatabase;
// Must call this before MMAPV1DatabaseCatalogEntry's destructor closes the DB files
- ON_BLOCK_EXIT([&dbEntry, &txn] {
- getDur().syncDataAndTruncateJournal(txn);
- dbEntry->close(txn);
+ ON_BLOCK_EXIT([&dbEntry, &opCtx] {
+ getDur().syncDataAndTruncateJournal(opCtx);
+ dbEntry->close(opCtx);
});
{
dbEntry.reset(new MMAPV1DatabaseCatalogEntry(
- txn,
+ opCtx,
dbName,
reservedPathString,
storageGlobalParams.directoryperdb,
true,
_extentManagerFactory->create(
dbName, reservedPathString, storageGlobalParams.directoryperdb)));
- tempDatabase.reset(new Database(txn, dbName, dbEntry.get()));
+ tempDatabase.reset(new Database(opCtx, dbName, dbEntry.get()));
}
map<string, CollectionOptions> namespacesToCopy;
{
string ns = dbName + ".system.namespaces";
- OldClientContext ctx(txn, ns);
+ OldClientContext ctx(opCtx, ns);
Collection* coll = originalDatabase->getCollection(ns);
if (coll) {
- auto cursor = coll->getCursor(txn);
+ auto cursor = coll->getCursor(opCtx);
while (auto record = cursor->next()) {
BSONObj obj = record->data.releaseToBson();
@@ -378,23 +378,23 @@ Status MMAPV1Engine::repairDatabase(OperationContext* txn,
Collection* tempCollection = NULL;
{
- WriteUnitOfWork wunit(txn);
- tempCollection = tempDatabase->createCollection(txn, ns, options, false);
+ WriteUnitOfWork wunit(opCtx);
+ tempCollection = tempDatabase->createCollection(opCtx, ns, options, false);
wunit.commit();
}
- OldClientContext readContext(txn, ns, originalDatabase);
+ OldClientContext readContext(opCtx, ns, originalDatabase);
Collection* originalCollection = originalDatabase->getCollection(ns);
invariant(originalCollection);
// data
// TODO SERVER-14812 add a mode that drops duplicates rather than failing
- MultiIndexBlock indexer(txn, tempCollection);
+ MultiIndexBlock indexer(opCtx, tempCollection);
{
vector<BSONObj> indexes;
IndexCatalog::IndexIterator ii =
- originalCollection->getIndexCatalog()->getIndexIterator(txn, false);
+ originalCollection->getIndexCatalog()->getIndexIterator(opCtx, false);
while (ii.more()) {
IndexDescriptor* desc = ii.next();
indexes.push_back(desc->infoObj());
@@ -407,17 +407,17 @@ Status MMAPV1Engine::repairDatabase(OperationContext* txn,
}
std::vector<MultiIndexBlock*> indexers{&indexer};
- auto cursor = originalCollection->getCursor(txn);
+ auto cursor = originalCollection->getCursor(opCtx);
while (auto record = cursor->next()) {
BSONObj doc = record->data.releaseToBson();
- WriteUnitOfWork wunit(txn);
- Status status = tempCollection->insertDocument(txn, doc, indexers, false);
+ WriteUnitOfWork wunit(opCtx);
+ Status status = tempCollection->insertDocument(opCtx, doc, indexers, false);
if (!status.isOK())
return status;
wunit.commit();
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
}
Status status = indexer.doneInserting();
@@ -425,18 +425,18 @@ Status MMAPV1Engine::repairDatabase(OperationContext* txn,
return status;
{
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
indexer.commit();
wunit.commit();
}
}
- getDur().syncDataAndTruncateJournal(txn);
+ getDur().syncDataAndTruncateJournal(opCtx);
// need both in case journaling is disabled
- MongoFile::flushAll(txn, true);
+ MongoFile::flushAll(opCtx, true);
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
}
// at this point if we abort, we don't want to delete new files
@@ -446,7 +446,7 @@ Status MMAPV1Engine::repairDatabase(OperationContext* txn,
repairFileDeleter->success();
// Close the database so we can rename/delete the original data files
- dbHolder().close(txn, dbName);
+ dbHolder().close(opCtx, dbName);
if (backupOriginalFiles) {
_renameForBackup(dbName, reservedPath);
@@ -472,7 +472,7 @@ Status MMAPV1Engine::repairDatabase(OperationContext* txn,
}
// Reopen the database so it's discoverable
- dbHolder().openDb(txn, dbName);
+ dbHolder().openDb(opCtx, dbName);
return Status::OK();
}
diff --git a/src/mongo/db/storage/record_fetcher.h b/src/mongo/db/storage/record_fetcher.h
index e133e28bdf0..0c8d5f18080 100644
--- a/src/mongo/db/storage/record_fetcher.h
+++ b/src/mongo/db/storage/record_fetcher.h
@@ -44,7 +44,7 @@ public:
/**
* Performs any setup which is needed prior to yielding locks.
*/
- virtual void setup(OperationContext* txn) = 0;
+ virtual void setup(OperationContext* opCtx) = 0;
/**
* Called after locks are yielded in order to bring data into memory.
diff --git a/src/mongo/db/storage/record_store.h b/src/mongo/db/storage/record_store.h
index f17e07bfa59..c321938af06 100644
--- a/src/mongo/db/storage/record_store.h
+++ b/src/mongo/db/storage/record_store.h
@@ -78,7 +78,8 @@ protected:
class UpdateNotifier {
public:
virtual ~UpdateNotifier() {}
- virtual Status recordStoreGoingToUpdateInPlace(OperationContext* txn, const RecordId& loc) = 0;
+ virtual Status recordStoreGoingToUpdateInPlace(OperationContext* opCtx,
+ const RecordId& loc) = 0;
};
/**
@@ -199,12 +200,12 @@ public:
/**
* Inform the cursor that this id is being invalidated. Must be called between save and restore.
- * The txn is that of the operation causing the invalidation, not the txn using the cursor.
+ * The opCtx is that of the operation causing the invalidation, not the opCtx using the cursor.
*
* WARNING: Storage engines other than MMAPv1 should use the default implementation,
* and not depend on this being called.
*/
- virtual void invalidate(OperationContext* txn, const RecordId& id) {}
+ virtual void invalidate(OperationContext* opCtx, const RecordId& id) {}
//
// RecordFetchers
@@ -299,13 +300,13 @@ public:
* The dataSize is an approximation of the sum of the sizes (in bytes) of the
* documents or entries in the recordStore.
*/
- virtual long long dataSize(OperationContext* txn) const = 0;
+ virtual long long dataSize(OperationContext* opCtx) const = 0;
/**
* Total number of record in the RecordStore. You may need to cache it, so this call
* takes constant time, as it is called often.
*/
- virtual long long numRecords(OperationContext* txn) const = 0;
+ virtual long long numRecords(OperationContext* opCtx) const = 0;
virtual bool isCapped() const = 0;
@@ -318,7 +319,7 @@ public:
* @param level - optional, level of debug info to put in (higher is more)
* @return total estimate size (in bytes) on stable storage
*/
- virtual int64_t storageSize(OperationContext* txn,
+ virtual int64_t storageSize(OperationContext* opCtx,
BSONObjBuilder* extraInfo = NULL,
int infoLevel = 0) const = 0;
@@ -333,9 +334,9 @@ public:
* In general, prefer findRecord or RecordCursor::seekExact since they can tell you if a
* record has been removed.
*/
- virtual RecordData dataFor(OperationContext* txn, const RecordId& loc) const {
+ virtual RecordData dataFor(OperationContext* opCtx, const RecordId& loc) const {
RecordData data;
- invariant(findRecord(txn, loc, &data));
+ invariant(findRecord(opCtx, loc, &data));
return data;
}
@@ -353,8 +354,8 @@ public:
* potentially deleted RecordIds to seek methods if they know that MMAPv1 is not the current
* storage engine. All new storage engines must support detecting the existence of Records.
*/
- virtual bool findRecord(OperationContext* txn, const RecordId& loc, RecordData* out) const {
- auto cursor = getCursor(txn);
+ virtual bool findRecord(OperationContext* opCtx, const RecordId& loc, RecordData* out) const {
+ auto cursor = getCursor(opCtx);
auto record = cursor->seekExact(loc);
if (!record)
return false;
@@ -364,19 +365,19 @@ public:
return true;
}
- virtual void deleteRecord(OperationContext* txn, const RecordId& dl) = 0;
+ virtual void deleteRecord(OperationContext* opCtx, const RecordId& dl) = 0;
- virtual StatusWith<RecordId> insertRecord(OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord(OperationContext* opCtx,
const char* data,
int len,
bool enforceQuota) = 0;
- virtual Status insertRecords(OperationContext* txn,
+ virtual Status insertRecords(OperationContext* opCtx,
std::vector<Record>* records,
bool enforceQuota) {
for (auto& record : *records) {
StatusWith<RecordId> res =
- insertRecord(txn, record.data.data(), record.data.size(), enforceQuota);
+ insertRecord(opCtx, record.data.data(), record.data.size(), enforceQuota);
if (!res.isOK())
return res.getStatus();
@@ -394,7 +395,7 @@ public:
* On success, if idsOut is non-null the RecordIds of the inserted records will be written into
* it. It must have space for nDocs RecordIds.
*/
- virtual Status insertRecordsWithDocWriter(OperationContext* txn,
+ virtual Status insertRecordsWithDocWriter(OperationContext* opCtx,
const DocWriter* const* docs,
size_t nDocs,
RecordId* idsOut = nullptr) = 0;
@@ -402,9 +403,9 @@ public:
/**
* A thin wrapper around insertRecordsWithDocWriter() to simplify handling of single DocWriters.
*/
- StatusWith<RecordId> insertRecordWithDocWriter(OperationContext* txn, const DocWriter* doc) {
+ StatusWith<RecordId> insertRecordWithDocWriter(OperationContext* opCtx, const DocWriter* doc) {
RecordId out;
- Status status = insertRecordsWithDocWriter(txn, &doc, 1, &out);
+ Status status = insertRecordsWithDocWriter(opCtx, &doc, 1, &out);
if (!status.isOK())
return status;
return out;
@@ -422,7 +423,7 @@ public:
*
* For capped record stores, the record size will never change.
*/
- virtual Status updateRecord(OperationContext* txn,
+ virtual Status updateRecord(OperationContext* opCtx,
const RecordId& oldLocation,
const char* data,
int len,
@@ -446,7 +447,7 @@ public:
* @return the updated version of the record. If unowned data is returned, then it is valid
* until the next modification of this Record or the lock on the collection has been released.
*/
- virtual StatusWith<RecordData> updateWithDamages(OperationContext* txn,
+ virtual StatusWith<RecordData> updateWithDamages(OperationContext* opCtx,
const RecordId& loc,
const RecordData& oldRec,
const char* damageSource,
@@ -460,7 +461,7 @@ public:
* are allowed to lazily seek to the first Record when next() is called rather than doing
* it on construction.
*/
- virtual std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* txn,
+ virtual std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* opCtx,
bool forward = true) const = 0;
/**
@@ -468,7 +469,7 @@ public:
* damaged records. The iterator might return every record in the store if all of them
* are reachable and not corrupted. Returns NULL if not supported.
*/
- virtual std::unique_ptr<RecordCursor> getCursorForRepair(OperationContext* txn) const {
+ virtual std::unique_ptr<RecordCursor> getCursorForRepair(OperationContext* opCtx) const {
return {};
}
@@ -483,7 +484,7 @@ public:
* the record store. Implementations should avoid obvious biases toward older, newer, larger
* smaller or other specific classes of documents.
*/
- virtual std::unique_ptr<RecordCursor> getRandomCursor(OperationContext* txn) const {
+ virtual std::unique_ptr<RecordCursor> getRandomCursor(OperationContext* opCtx) const {
return {};
}
@@ -491,9 +492,10 @@ public:
* Returns many RecordCursors that partition the RecordStore into many disjoint sets.
* Iterating all returned RecordCursors is equivalent to iterating the full store.
*/
- virtual std::vector<std::unique_ptr<RecordCursor>> getManyCursors(OperationContext* txn) const {
+ virtual std::vector<std::unique_ptr<RecordCursor>> getManyCursors(
+ OperationContext* opCtx) const {
std::vector<std::unique_ptr<RecordCursor>> out(1);
- out[0] = getCursor(txn);
+ out[0] = getCursor(opCtx);
return out;
}
@@ -503,7 +505,7 @@ public:
/**
* removes all Records
*/
- virtual Status truncate(OperationContext* txn) = 0;
+ virtual Status truncate(OperationContext* opCtx) = 0;
/**
* Truncate documents newer than the document at 'end' from the capped
@@ -511,7 +513,7 @@ public:
* function. An assertion will be thrown if that is attempted.
* @param inclusive - Truncate 'end' as well iff true
*/
- virtual void cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive) = 0;
+ virtual void cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) = 0;
/**
* does this RecordStore support the compact operation?
@@ -537,7 +539,7 @@ public:
* Only called if compactSupported() returns true.
* No RecordStoreCompactAdaptor will be passed if compactsInPlace() returns true.
*/
- virtual Status compact(OperationContext* txn,
+ virtual Status compact(OperationContext* opCtx,
RecordStoreCompactAdaptor* adaptor,
const CompactOptions* options,
CompactStats* stats) {
@@ -549,7 +551,7 @@ public:
* OK will be returned even if corruption is found
* deatils will be in result
*/
- virtual Status validate(OperationContext* txn,
+ virtual Status validate(OperationContext* opCtx,
ValidateCmdLevel level,
ValidateAdaptor* adaptor,
ValidateResults* results,
@@ -559,7 +561,7 @@ public:
* @param scaleSize - amount by which to scale size metrics
* appends any custom stats from the RecordStore or other unique stats
*/
- virtual void appendCustomStats(OperationContext* txn,
+ virtual void appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* result,
double scale) const = 0;
@@ -572,7 +574,7 @@ public:
*
* @param output (optional) - where to put detailed stats
*/
- virtual Status touch(OperationContext* txn, BSONObjBuilder* output) const {
+ virtual Status touch(OperationContext* opCtx, BSONObjBuilder* output) const {
return Status(ErrorCodes::CommandNotSupported,
"this storage engine does not support touch");
}
@@ -584,7 +586,7 @@ public:
* If you don't implement the oplogStartHack, just use the default implementation which
* returns boost::none.
*/
- virtual boost::optional<RecordId> oplogStartHack(OperationContext* txn,
+ virtual boost::optional<RecordId> oplogStartHack(OperationContext* opCtx,
const RecordId& startingPosition) const {
return boost::none;
}
@@ -597,7 +599,7 @@ public:
* Since this is called inside of a WriteUnitOfWork while holding a std::mutex, it is
* illegal to acquire any LockManager locks inside of this function.
*/
- virtual Status oplogDiskLocRegister(OperationContext* txn, const Timestamp& opTime) {
+ virtual Status oplogDiskLocRegister(OperationContext* opCtx, const Timestamp& opTime) {
return Status::OK();
}
@@ -608,12 +610,12 @@ public:
* It is only legal to call this on an oplog. It is illegal to call this inside a
* WriteUnitOfWork.
*/
- virtual void waitForAllEarlierOplogWritesToBeVisible(OperationContext* txn) const = 0;
+ virtual void waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx) const = 0;
/**
* Called after a repair operation is run with the recomputed numRecords and dataSize.
*/
- virtual void updateStatsAfterRepair(OperationContext* txn,
+ virtual void updateStatsAfterRepair(OperationContext* opCtx,
long long numRecords,
long long dataSize) = 0;
diff --git a/src/mongo/db/storage/record_store_test_capped_visibility.cpp b/src/mongo/db/storage/record_store_test_capped_visibility.cpp
index 3669edb391f..8ced75f97be 100644
--- a/src/mongo/db/storage/record_store_test_capped_visibility.cpp
+++ b/src/mongo/db/storage/record_store_test_capped_visibility.cpp
@@ -40,9 +40,9 @@
namespace mongo {
namespace {
-RecordId doInsert(unowned_ptr<OperationContext> txn, unowned_ptr<RecordStore> rs) {
+RecordId doInsert(unowned_ptr<OperationContext> opCtx, unowned_ptr<RecordStore> rs) {
static char zeros[16];
- return uassertStatusOK(rs->insertRecord(txn, zeros, sizeof(zeros), false));
+ return uassertStatusOK(rs->insertRecord(opCtx, zeros, sizeof(zeros), false));
}
// macro to keep assert line numbers correct.
@@ -76,27 +76,27 @@ TEST(RecordStore_CappedVisibility, EmptyInitialState) {
RecordId otherId;
{
- auto txn = harness->newOperationContext();
- WriteUnitOfWork wuow(txn.get());
+ auto opCtx = harness->newOperationContext();
+ WriteUnitOfWork wuow(opCtx.get());
// Can't see uncommitted write from other operation.
- ASSERT(!rs->getCursor(txn.get())->seekExact(lowestHiddenId));
+ ASSERT(!rs->getCursor(opCtx.get())->seekExact(lowestHiddenId));
- ASSERT(!rs->getCursor(txn.get(), true)->next());
- ASSERT(!rs->getCursor(txn.get(), false)->next());
+ ASSERT(!rs->getCursor(opCtx.get(), true)->next());
+ ASSERT(!rs->getCursor(opCtx.get(), false)->next());
- otherId = doInsert(txn, rs);
+ otherId = doInsert(opCtx, rs);
- ASSERT(!rs->getCursor(txn.get(), true)->next());
- ASSERT_ID_EQ(rs->getCursor(txn.get(), false)->next(), otherId);
- ASSERT_ID_EQ(rs->getCursor(txn.get())->seekExact(otherId), otherId);
+ ASSERT(!rs->getCursor(opCtx.get(), true)->next());
+ ASSERT_ID_EQ(rs->getCursor(opCtx.get(), false)->next(), otherId);
+ ASSERT_ID_EQ(rs->getCursor(opCtx.get())->seekExact(otherId), otherId);
wuow.commit();
- ASSERT(!rs->getCursor(txn.get(), true)->next());
- ASSERT_ID_EQ(rs->getCursor(txn.get(), false)->next(), otherId);
- ASSERT_ID_EQ(rs->getCursor(txn.get())->seekExact(otherId), otherId);
- ASSERT(!rs->getCursor(txn.get())->seekExact(lowestHiddenId));
+ ASSERT(!rs->getCursor(opCtx.get(), true)->next());
+ ASSERT_ID_EQ(rs->getCursor(opCtx.get(), false)->next(), otherId);
+ ASSERT_ID_EQ(rs->getCursor(opCtx.get())->seekExact(otherId), otherId);
+ ASSERT(!rs->getCursor(opCtx.get())->seekExact(lowestHiddenId));
}
// longLivedOp is still on old snapshot so it can't see otherId yet.
@@ -147,28 +147,28 @@ TEST(RecordStore_CappedVisibility, NonEmptyInitialState) {
RecordId otherId;
{
- auto txn = harness->newOperationContext();
- WriteUnitOfWork wuow(txn.get());
+ auto opCtx = harness->newOperationContext();
+ WriteUnitOfWork wuow(opCtx.get());
// Can only see committed writes from other operation.
- ASSERT_ID_EQ(rs->getCursor(txn.get())->seekExact(initialId), initialId);
- ASSERT(!rs->getCursor(txn.get())->seekExact(lowestHiddenId));
+ ASSERT_ID_EQ(rs->getCursor(opCtx.get())->seekExact(initialId), initialId);
+ ASSERT(!rs->getCursor(opCtx.get())->seekExact(lowestHiddenId));
- ASSERT_ID_EQ(rs->getCursor(txn.get(), true)->next(), initialId);
- ASSERT_ID_EQ(rs->getCursor(txn.get(), false)->next(), initialId);
+ ASSERT_ID_EQ(rs->getCursor(opCtx.get(), true)->next(), initialId);
+ ASSERT_ID_EQ(rs->getCursor(opCtx.get(), false)->next(), initialId);
- otherId = doInsert(txn, rs);
+ otherId = doInsert(opCtx, rs);
- ASSERT_ID_EQ(rs->getCursor(txn.get(), true)->next(), initialId);
- ASSERT_ID_EQ(rs->getCursor(txn.get(), false)->next(), otherId);
- ASSERT_ID_EQ(rs->getCursor(txn.get())->seekExact(otherId), otherId);
+ ASSERT_ID_EQ(rs->getCursor(opCtx.get(), true)->next(), initialId);
+ ASSERT_ID_EQ(rs->getCursor(opCtx.get(), false)->next(), otherId);
+ ASSERT_ID_EQ(rs->getCursor(opCtx.get())->seekExact(otherId), otherId);
wuow.commit();
- ASSERT_ID_EQ(rs->getCursor(txn.get(), true)->next(), initialId);
- ASSERT_ID_EQ(rs->getCursor(txn.get(), false)->next(), otherId);
- ASSERT_ID_EQ(rs->getCursor(txn.get())->seekExact(otherId), otherId);
- ASSERT(!rs->getCursor(txn.get())->seekExact(lowestHiddenId));
+ ASSERT_ID_EQ(rs->getCursor(opCtx.get(), true)->next(), initialId);
+ ASSERT_ID_EQ(rs->getCursor(opCtx.get(), false)->next(), otherId);
+ ASSERT_ID_EQ(rs->getCursor(opCtx.get())->seekExact(otherId), otherId);
+ ASSERT(!rs->getCursor(opCtx.get())->seekExact(lowestHiddenId));
}
// longLivedOp is still on old snapshot so it can't see otherId yet.
diff --git a/src/mongo/db/storage/record_store_test_updaterecord.h b/src/mongo/db/storage/record_store_test_updaterecord.h
index be52887cf2b..e20b32bcce4 100644
--- a/src/mongo/db/storage/record_store_test_updaterecord.h
+++ b/src/mongo/db/storage/record_store_test_updaterecord.h
@@ -40,14 +40,14 @@ namespace {
class UpdateNotifierSpy : public UpdateNotifier {
public:
- UpdateNotifierSpy(OperationContext* txn, const RecordId& loc, const char* buf, size_t size)
- : _txn(txn), _loc(loc), _data(buf, size), nInPlaceCalls(0) {}
+ UpdateNotifierSpy(OperationContext* opCtx, const RecordId& loc, const char* buf, size_t size)
+ : _opCtx(opCtx), _loc(loc), _data(buf, size), nInPlaceCalls(0) {}
~UpdateNotifierSpy() {}
- Status recordStoreGoingToUpdateInPlace(OperationContext* txn, const RecordId& loc) {
+ Status recordStoreGoingToUpdateInPlace(OperationContext* opCtx, const RecordId& loc) {
nInPlaceCalls++;
- ASSERT_EQUALS(_txn, txn);
+ ASSERT_EQUALS(_opCtx, opCtx);
ASSERT_EQUALS(_loc, loc);
return Status::OK();
}
@@ -57,7 +57,7 @@ public:
}
private:
- OperationContext* _txn;
+ OperationContext* _opCtx;
RecordId _loc;
std::string _data;
diff --git a/src/mongo/db/storage/snapshot_manager.h b/src/mongo/db/storage/snapshot_manager.h
index 8d4b81fd5c3..ef588d1c1a2 100644
--- a/src/mongo/db/storage/snapshot_manager.h
+++ b/src/mongo/db/storage/snapshot_manager.h
@@ -58,7 +58,7 @@ public:
* This must be the first method called after starting a ScopedTransaction, and it is
* illegal to start a WriteUnitOfWork inside of the same ScopedTransaction.
*/
- virtual Status prepareForCreateSnapshot(OperationContext* txn) = 0;
+ virtual Status prepareForCreateSnapshot(OperationContext* opCtx) = 0;
/**
* Creates a new named snapshot representing the same point-in-time captured in
@@ -68,7 +68,7 @@ public:
*
* Caller guarantees that this name must compare greater than all existing snapshots.
*/
- virtual Status createSnapshot(OperationContext* txn, const SnapshotName& name) = 0;
+ virtual Status createSnapshot(OperationContext* opCtx, const SnapshotName& name) = 0;
/**
* Sets the snapshot to be used for committed reads.
diff --git a/src/mongo/db/storage/sorted_data_interface.h b/src/mongo/db/storage/sorted_data_interface.h
index d21fb6e3c8f..7dfcc8554f3 100644
--- a/src/mongo/db/storage/sorted_data_interface.h
+++ b/src/mongo/db/storage/sorted_data_interface.h
@@ -75,18 +75,19 @@ public:
* Implementations can assume that 'this' index outlives its bulk
* builder.
*
- * @param txn the transaction under which keys are added to 'this' index
+ * @param opCtx the transaction under which keys are added to 'this' index
* @param dupsAllowed true if duplicate keys are allowed, and false
* otherwise
*
* @return caller takes ownership
*/
- virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* txn, bool dupsAllowed) = 0;
+ virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* opCtx,
+ bool dupsAllowed) = 0;
/**
* Insert an entry into the index with the specified key and RecordId.
*
- * @param txn the transaction under which the insert takes place
+ * @param opCtx the transaction under which the insert takes place
* @param dupsAllowed true if duplicate keys are allowed, and false
* otherwise
*
@@ -95,7 +96,7 @@ public:
* ErrorCodes::DuplicateKey if 'key' already exists in 'this' index
* at a RecordId other than 'loc' and duplicates were not allowed
*/
- virtual Status insert(OperationContext* txn,
+ virtual Status insert(OperationContext* opCtx,
const BSONObj& key,
const RecordId& loc,
bool dupsAllowed) = 0;
@@ -103,11 +104,11 @@ public:
/**
* Remove the entry from the index with the specified key and RecordId.
*
- * @param txn the transaction under which the remove takes place
+ * @param opCtx the transaction under which the remove takes place
* @param dupsAllowed true if duplicate keys are allowed, and false
* otherwise
*/
- virtual void unindex(OperationContext* txn,
+ virtual void unindex(OperationContext* opCtx,
const BSONObj& key,
const RecordId& loc,
bool dupsAllowed) = 0;
@@ -116,17 +117,19 @@ public:
* Return ErrorCodes::DuplicateKey if 'key' already exists in 'this'
* index at a RecordId other than 'loc', and Status::OK() otherwise.
*
- * @param txn the transaction under which this operation takes place
+ * @param opCtx the transaction under which this operation takes place
*
* TODO: Hide this by exposing an update method?
*/
- virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& loc) = 0;
+ virtual Status dupKeyCheck(OperationContext* opCtx,
+ const BSONObj& key,
+ const RecordId& loc) = 0;
/**
* Attempt to reduce the storage space used by this index via compaction. Only called if the
* indexed record store supports compaction-in-place.
*/
- virtual Status compact(OperationContext* txn) {
+ virtual Status compact(OperationContext* opCtx) {
return Status::OK();
}
@@ -137,11 +140,11 @@ public:
/**
* TODO: expose full set of args for testing?
*/
- virtual void fullValidate(OperationContext* txn,
+ virtual void fullValidate(OperationContext* opCtx,
long long* numKeysOut,
ValidateResults* fullResults) const = 0;
- virtual bool appendCustomStats(OperationContext* txn,
+ virtual bool appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* output,
double scale) const = 0;
@@ -149,16 +152,16 @@ public:
/**
* Return the number of bytes consumed by 'this' index.
*
- * @param txn the transaction under which this operation takes place
+ * @param opCtx the transaction under which this operation takes place
*
* @see IndexAccessMethod::getSpaceUsedBytes
*/
- virtual long long getSpaceUsedBytes(OperationContext* txn) const = 0;
+ virtual long long getSpaceUsedBytes(OperationContext* opCtx) const = 0;
/**
* Return true if 'this' index is empty, and false otherwise.
*/
- virtual bool isEmpty(OperationContext* txn) = 0;
+ virtual bool isEmpty(OperationContext* opCtx) = 0;
/**
* Attempt to bring the entirety of 'this' index into memory.
@@ -168,7 +171,7 @@ public:
*
* @return Status::OK()
*/
- virtual Status touch(OperationContext* txn) const {
+ virtual Status touch(OperationContext* opCtx) const {
return Status(ErrorCodes::CommandNotSupported,
"this storage engine does not support touch");
}
@@ -179,9 +182,9 @@ public:
* The default implementation should be overridden with a more
* efficient one if at all possible.
*/
- virtual long long numEntries(OperationContext* txn) const {
+ virtual long long numEntries(OperationContext* opCtx) const {
long long x = -1;
- fullValidate(txn, &x, NULL);
+ fullValidate(opCtx, &x, NULL);
return x;
}
@@ -357,7 +360,7 @@ public:
*
* Implementations can assume that 'this' index outlives all cursors it produces.
*/
- virtual std::unique_ptr<Cursor> newCursor(OperationContext* txn,
+ virtual std::unique_ptr<Cursor> newCursor(OperationContext* opCtx,
bool isForward = true) const = 0;
/**
@@ -374,7 +377,7 @@ public:
* Implementations should avoid obvious biases toward older, newer, larger smaller or other
* specific classes of entries.
*/
- virtual std::unique_ptr<Cursor> newRandomCursor(OperationContext* txn) const {
+ virtual std::unique_ptr<Cursor> newRandomCursor(OperationContext* opCtx) const {
return {};
}
@@ -382,7 +385,7 @@ public:
// Index creation
//
- virtual Status initAsEmpty(OperationContext* txn) = 0;
+ virtual Status initAsEmpty(OperationContext* opCtx) = 0;
};
/**
diff --git a/src/mongo/db/storage/sorted_data_interface_test_harness.cpp b/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
index 57461bcf0d4..5c034ceedbc 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
@@ -47,22 +47,22 @@ auto mongo::SortedDataInterfaceHarnessHelper::newSortedDataInterface(
return index;
}
-void mongo::insertToIndex(unowned_ptr<OperationContext> txn,
+void mongo::insertToIndex(unowned_ptr<OperationContext> opCtx,
unowned_ptr<SortedDataInterface> index,
std::initializer_list<IndexKeyEntry> toInsert) {
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
for (auto&& entry : toInsert) {
- ASSERT_OK(index->insert(txn, entry.key, entry.loc, true));
+ ASSERT_OK(index->insert(opCtx, entry.key, entry.loc, true));
}
wuow.commit();
}
-void mongo::removeFromIndex(unowned_ptr<OperationContext> txn,
+void mongo::removeFromIndex(unowned_ptr<OperationContext> opCtx,
unowned_ptr<SortedDataInterface> index,
std::initializer_list<IndexKeyEntry> toRemove) {
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
for (auto&& entry : toRemove) {
- index->unindex(txn, entry.key, entry.loc, true);
+ index->unindex(opCtx, entry.key, entry.loc, true);
}
wuow.commit();
}
diff --git a/src/mongo/db/storage/sorted_data_interface_test_harness.h b/src/mongo/db/storage/sorted_data_interface_test_harness.h
index def0ed88813..e6f9443fd23 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_harness.h
+++ b/src/mongo/db/storage/sorted_data_interface_test_harness.h
@@ -105,7 +105,7 @@ public:
*
* Should be used for declaring and changing conditions, not for testing inserts.
*/
-void insertToIndex(unowned_ptr<OperationContext> txn,
+void insertToIndex(unowned_ptr<OperationContext> opCtx,
unowned_ptr<SortedDataInterface> index,
std::initializer_list<IndexKeyEntry> toInsert);
@@ -122,7 +122,7 @@ inline void insertToIndex(unowned_ptr<HarnessHelper> harness,
*
* Should be used for declaring and changing conditions, not for testing removes.
*/
-void removeFromIndex(unowned_ptr<OperationContext> txn,
+void removeFromIndex(unowned_ptr<OperationContext> opCtx,
unowned_ptr<SortedDataInterface> index,
std::initializer_list<IndexKeyEntry> toRemove);
diff --git a/src/mongo/db/storage/storage_engine.h b/src/mongo/db/storage/storage_engine.h
index 02e9c1ff7aa..f3f00631c8d 100644
--- a/src/mongo/db/storage/storage_engine.h
+++ b/src/mongo/db/storage/storage_engine.h
@@ -197,17 +197,17 @@ public:
/**
* Closes all file handles associated with a database.
*/
- virtual Status closeDatabase(OperationContext* txn, StringData db) = 0;
+ virtual Status closeDatabase(OperationContext* opCtx, StringData db) = 0;
/**
* Deletes all data and metadata for a database.
*/
- virtual Status dropDatabase(OperationContext* txn, StringData db) = 0;
+ virtual Status dropDatabase(OperationContext* opCtx, StringData db) = 0;
/**
* @return number of files flushed
*/
- virtual int flushAllFiles(OperationContext* txn, bool sync) = 0;
+ virtual int flushAllFiles(OperationContext* opCtx, bool sync) = 0;
/**
* Transitions the storage engine into backup mode.
@@ -228,7 +228,7 @@ public:
* retried, returns a non-OK status. This function may throw a WriteConflictException, which
* should trigger a retry by the caller. All other exceptions should be treated as errors.
*/
- virtual Status beginBackup(OperationContext* txn) {
+ virtual Status beginBackup(OperationContext* opCtx) {
return Status(ErrorCodes::CommandNotSupported,
"The current storage engine doesn't support backup mode");
}
@@ -240,7 +240,7 @@ public:
*
* Storage engines implementing this feature should fassert when unable to leave backup mode.
*/
- virtual void endBackup(OperationContext* txn) {
+ virtual void endBackup(OperationContext* opCtx) {
return;
}
@@ -253,7 +253,7 @@ public:
*
* NOTE: MMAPv1 does not support this method and has its own repairDatabase() method.
*/
- virtual Status repairRecordStore(OperationContext* txn, const std::string& ns) = 0;
+ virtual Status repairRecordStore(OperationContext* opCtx, const std::string& ns) = 0;
/**
* This method will be called before there is a clean shutdown. Storage engines should
diff --git a/src/mongo/db/storage/storage_init.cpp b/src/mongo/db/storage/storage_init.cpp
index 0af1b78a602..dc21864d5b5 100644
--- a/src/mongo/db/storage/storage_init.cpp
+++ b/src/mongo/db/storage/storage_init.cpp
@@ -52,8 +52,9 @@ public:
return true;
}
- virtual BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
- auto engine = txn->getClient()->getServiceContext()->getGlobalStorageEngine();
+ virtual BSONObj generateSection(OperationContext* opCtx,
+ const BSONElement& configElement) const {
+ auto engine = opCtx->getClient()->getServiceContext()->getGlobalStorageEngine();
return BSON("name" << storageGlobalParams.engine << "supportsCommittedReads"
<< bool(engine->getSnapshotManager())
<< "readOnly"
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
index 5daafcbd8ea..5e70584a86d 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
@@ -209,11 +209,11 @@ StatusWith<std::string> WiredTigerIndex::generateCreateString(const std::string&
return StatusWith<std::string>(ss);
}
-int WiredTigerIndex::Create(OperationContext* txn,
+int WiredTigerIndex::Create(OperationContext* opCtx,
const std::string& uri,
const std::string& config) {
// Don't use the session from the recovery unit: create should not be used in a transaction
- WiredTigerSession session(WiredTigerRecoveryUnit::get(txn)->getSessionCache()->conn());
+ WiredTigerSession session(WiredTigerRecoveryUnit::get(opCtx)->getSessionCache()->conn());
WT_SESSION* s = session.getSession();
LOG(1) << "create uri: " << uri << " config: " << config;
return s->create(s, uri.c_str(), config.c_str());
@@ -244,7 +244,7 @@ WiredTigerIndex::WiredTigerIndex(OperationContext* ctx,
version.getValue() == kKeyStringV1Version ? KeyString::Version::V1 : KeyString::Version::V0;
}
-Status WiredTigerIndex::insert(OperationContext* txn,
+Status WiredTigerIndex::insert(OperationContext* opCtx,
const BSONObj& key,
const RecordId& id,
bool dupsAllowed) {
@@ -255,21 +255,21 @@ Status WiredTigerIndex::insert(OperationContext* txn,
if (!s.isOK())
return s;
- WiredTigerCursor curwrap(_uri, _tableId, false, txn);
+ WiredTigerCursor curwrap(_uri, _tableId, false, opCtx);
curwrap.assertInActiveTxn();
WT_CURSOR* c = curwrap.get();
return _insert(c, key, id, dupsAllowed);
}
-void WiredTigerIndex::unindex(OperationContext* txn,
+void WiredTigerIndex::unindex(OperationContext* opCtx,
const BSONObj& key,
const RecordId& id,
bool dupsAllowed) {
invariant(id.isNormal());
dassert(!hasFieldNames(key));
- WiredTigerCursor curwrap(_uri, _tableId, false, txn);
+ WiredTigerCursor curwrap(_uri, _tableId, false, opCtx);
curwrap.assertInActiveTxn();
WT_CURSOR* c = curwrap.get();
invariant(c);
@@ -277,11 +277,11 @@ void WiredTigerIndex::unindex(OperationContext* txn,
_unindex(c, key, id, dupsAllowed);
}
-void WiredTigerIndex::fullValidate(OperationContext* txn,
+void WiredTigerIndex::fullValidate(OperationContext* opCtx,
long long* numKeysOut,
ValidateResults* fullResults) const {
- if (fullResults && !WiredTigerRecoveryUnit::get(txn)->getSessionCache()->isEphemeral()) {
- int err = WiredTigerUtil::verifyTable(txn, _uri, &(fullResults->errors));
+ if (fullResults && !WiredTigerRecoveryUnit::get(opCtx)->getSessionCache()->isEphemeral()) {
+ int err = WiredTigerUtil::verifyTable(opCtx, _uri, &(fullResults->errors));
if (err == EBUSY) {
const char* msg = "verify() returned EBUSY. Not treating as invalid.";
warning() << msg;
@@ -298,7 +298,7 @@ void WiredTigerIndex::fullValidate(OperationContext* txn,
}
}
- auto cursor = newCursor(txn);
+ auto cursor = newCursor(opCtx);
long long count = 0;
TRACE_INDEX << " fullValidate";
@@ -312,12 +312,12 @@ void WiredTigerIndex::fullValidate(OperationContext* txn,
}
}
-bool WiredTigerIndex::appendCustomStats(OperationContext* txn,
+bool WiredTigerIndex::appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* output,
double scale) const {
{
BSONObjBuilder metadata(output->subobjStart("metadata"));
- Status status = WiredTigerUtil::getApplicationMetadata(txn, uri(), &metadata);
+ Status status = WiredTigerUtil::getApplicationMetadata(opCtx, uri(), &metadata);
if (!status.isOK()) {
metadata.append("error", "unable to retrieve metadata");
metadata.append("code", static_cast<int>(status.code()));
@@ -325,8 +325,8 @@ bool WiredTigerIndex::appendCustomStats(OperationContext* txn,
}
}
std::string type, sourceURI;
- WiredTigerUtil::fetchTypeAndSourceURI(txn, _uri, &type, &sourceURI);
- StatusWith<std::string> metadataResult = WiredTigerUtil::getMetadata(txn, sourceURI);
+ WiredTigerUtil::fetchTypeAndSourceURI(opCtx, _uri, &type, &sourceURI);
+ StatusWith<std::string> metadataResult = WiredTigerUtil::getMetadata(opCtx, sourceURI);
StringData creationStringName("creationString");
if (!metadataResult.isOK()) {
BSONObjBuilder creationString(output->subobjStart(creationStringName));
@@ -339,7 +339,7 @@ bool WiredTigerIndex::appendCustomStats(OperationContext* txn,
output->append("type", type);
}
- WiredTigerSession* session = WiredTigerRecoveryUnit::get(txn)->getSession(txn);
+ WiredTigerSession* session = WiredTigerRecoveryUnit::get(opCtx)->getSession(opCtx);
WT_SESSION* s = session->getSession();
Status status =
WiredTigerUtil::exportTableToBSON(s, "statistics:" + uri(), "statistics=(fast)", output);
@@ -351,11 +351,13 @@ bool WiredTigerIndex::appendCustomStats(OperationContext* txn,
return true;
}
-Status WiredTigerIndex::dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& id) {
+Status WiredTigerIndex::dupKeyCheck(OperationContext* opCtx,
+ const BSONObj& key,
+ const RecordId& id) {
invariant(!hasFieldNames(key));
invariant(unique());
- WiredTigerCursor curwrap(_uri, _tableId, false, txn);
+ WiredTigerCursor curwrap(_uri, _tableId, false, opCtx);
WT_CURSOR* c = curwrap.get();
if (isDup(c, key, id))
@@ -363,8 +365,8 @@ Status WiredTigerIndex::dupKeyCheck(OperationContext* txn, const BSONObj& key, c
return Status::OK();
}
-bool WiredTigerIndex::isEmpty(OperationContext* txn) {
- WiredTigerCursor curwrap(_uri, _tableId, false, txn);
+bool WiredTigerIndex::isEmpty(OperationContext* opCtx) {
+ WiredTigerCursor curwrap(_uri, _tableId, false, opCtx);
WT_CURSOR* c = curwrap.get();
if (!c)
return true;
@@ -375,8 +377,8 @@ bool WiredTigerIndex::isEmpty(OperationContext* txn) {
return false;
}
-Status WiredTigerIndex::touch(OperationContext* txn) const {
- if (WiredTigerRecoveryUnit::get(txn)->getSessionCache()->isEphemeral()) {
+Status WiredTigerIndex::touch(OperationContext* opCtx) const {
+ if (WiredTigerRecoveryUnit::get(opCtx)->getSessionCache()->isEphemeral()) {
// Everything is already in memory.
return Status::OK();
}
@@ -384,9 +386,9 @@ Status WiredTigerIndex::touch(OperationContext* txn) const {
}
-long long WiredTigerIndex::getSpaceUsedBytes(OperationContext* txn) const {
- auto ru = WiredTigerRecoveryUnit::get(txn);
- WiredTigerSession* session = ru->getSession(txn);
+long long WiredTigerIndex::getSpaceUsedBytes(OperationContext* opCtx) const {
+ auto ru = WiredTigerRecoveryUnit::get(opCtx);
+ WiredTigerSession* session = ru->getSession(opCtx);
if (ru->getSessionCache()->isEphemeral()) {
// For ephemeral case, use cursor statistics
@@ -449,13 +451,13 @@ bool WiredTigerIndex::isDup(WT_CURSOR* c, const BSONObj& key, const RecordId& id
return true;
}
-Status WiredTigerIndex::initAsEmpty(OperationContext* txn) {
+Status WiredTigerIndex::initAsEmpty(OperationContext* opCtx) {
// No-op
return Status::OK();
}
-Status WiredTigerIndex::compact(OperationContext* txn) {
- WiredTigerSessionCache* cache = WiredTigerRecoveryUnit::get(txn)->getSessionCache();
+Status WiredTigerIndex::compact(OperationContext* opCtx) {
+ WiredTigerSessionCache* cache = WiredTigerRecoveryUnit::get(opCtx)->getSessionCache();
if (!cache->isEphemeral()) {
UniqueWiredTigerSession session = cache->getSession();
WT_SESSION* s = session->getSession();
@@ -472,10 +474,10 @@ Status WiredTigerIndex::compact(OperationContext* txn) {
*/
class WiredTigerIndex::BulkBuilder : public SortedDataBuilderInterface {
public:
- BulkBuilder(WiredTigerIndex* idx, OperationContext* txn)
+ BulkBuilder(WiredTigerIndex* idx, OperationContext* opCtx)
: _ordering(idx->_ordering),
- _txn(txn),
- _session(WiredTigerRecoveryUnit::get(_txn)->getSessionCache()->getSession()),
+ _opCtx(opCtx),
+ _session(WiredTigerRecoveryUnit::get(_opCtx)->getSessionCache()->getSession()),
_cursor(openBulkCursor(idx)) {}
~BulkBuilder() {
@@ -486,7 +488,7 @@ protected:
WT_CURSOR* openBulkCursor(WiredTigerIndex* idx) {
// Open cursors can cause bulk open_cursor to fail with EBUSY.
// TODO any other cases that could cause EBUSY?
- WiredTigerSession* outerSession = WiredTigerRecoveryUnit::get(_txn)->getSession(_txn);
+ WiredTigerSession* outerSession = WiredTigerRecoveryUnit::get(_opCtx)->getSession(_opCtx);
outerSession->closeAllCursors();
// Not using cursor cache since we need to set "bulk".
@@ -509,7 +511,7 @@ protected:
}
const Ordering _ordering;
- OperationContext* const _txn;
+ OperationContext* const _opCtx;
UniqueWiredTigerSession const _session;
WT_CURSOR* const _cursor;
};
@@ -519,8 +521,8 @@ protected:
*/
class WiredTigerIndex::StandardBulkBuilder : public BulkBuilder {
public:
- StandardBulkBuilder(WiredTigerIndex* idx, OperationContext* txn)
- : BulkBuilder(idx, txn), _idx(idx) {}
+ StandardBulkBuilder(WiredTigerIndex* idx, OperationContext* opCtx)
+ : BulkBuilder(idx, opCtx), _idx(idx) {}
Status addKey(const BSONObj& key, const RecordId& id) {
{
@@ -549,7 +551,7 @@ public:
void commit(bool mayInterrupt) {
// TODO do we still need this?
// this is bizarre, but required as part of the contract
- WriteUnitOfWork uow(_txn);
+ WriteUnitOfWork uow(_opCtx);
uow.commit();
}
@@ -567,8 +569,8 @@ private:
*/
class WiredTigerIndex::UniqueBulkBuilder : public BulkBuilder {
public:
- UniqueBulkBuilder(WiredTigerIndex* idx, OperationContext* txn, bool dupsAllowed)
- : BulkBuilder(idx, txn),
+ UniqueBulkBuilder(WiredTigerIndex* idx, OperationContext* opCtx, bool dupsAllowed)
+ : BulkBuilder(idx, opCtx),
_idx(idx),
_dupsAllowed(dupsAllowed),
_keyString(idx->keyStringVersion()) {}
@@ -607,7 +609,7 @@ public:
}
void commit(bool mayInterrupt) {
- WriteUnitOfWork uow(_txn);
+ WriteUnitOfWork uow(_opCtx);
if (!_records.empty()) {
// This handles inserting the last unique key.
doInsert();
@@ -654,14 +656,14 @@ namespace {
*/
class WiredTigerIndexCursorBase : public SortedDataInterface::Cursor {
public:
- WiredTigerIndexCursorBase(const WiredTigerIndex& idx, OperationContext* txn, bool forward)
- : _txn(txn),
+ WiredTigerIndexCursorBase(const WiredTigerIndex& idx, OperationContext* opCtx, bool forward)
+ : _opCtx(opCtx),
_idx(idx),
_forward(forward),
_key(idx.keyStringVersion()),
_typeBits(idx.keyStringVersion()),
_query(idx.keyStringVersion()) {
- _cursor.emplace(_idx.uri(), _idx.tableId(), false, _txn);
+ _cursor.emplace(_idx.uri(), _idx.tableId(), false, _opCtx);
}
boost::optional<IndexKeyEntry> next(RequestedInfo parts) override {
// Advance on a cursor at the end is a no-op
@@ -739,11 +741,11 @@ public:
void restore() override {
if (!_cursor) {
- _cursor.emplace(_idx.uri(), _idx.tableId(), false, _txn);
+ _cursor.emplace(_idx.uri(), _idx.tableId(), false, _opCtx);
}
// Ensure an active session exists, so any restored cursors will bind to it
- invariant(WiredTigerRecoveryUnit::get(_txn)->getSession(_txn) == _cursor->getSession());
+ invariant(WiredTigerRecoveryUnit::get(_opCtx)->getSession(_opCtx) == _cursor->getSession());
if (!_eof) {
// Unique indices *don't* include the record id in their KeyStrings. If we seek to the
@@ -760,12 +762,12 @@ public:
}
void detachFromOperationContext() final {
- _txn = nullptr;
+ _opCtx = nullptr;
_cursor = boost::none;
}
- void reattachToOperationContext(OperationContext* txn) final {
- _txn = txn;
+ void reattachToOperationContext(OperationContext* opCtx) final {
+ _opCtx = opCtx;
// _cursor recreated in restore() to avoid risk of WT_ROLLBACK issues.
}
@@ -910,7 +912,7 @@ protected:
updateIdAndTypeBits();
}
- OperationContext* _txn;
+ OperationContext* _opCtx;
boost::optional<WiredTigerCursor> _cursor;
const WiredTigerIndex& _idx; // not owned
const bool _forward;
@@ -937,8 +939,8 @@ protected:
class WiredTigerIndexStandardCursor final : public WiredTigerIndexCursorBase {
public:
- WiredTigerIndexStandardCursor(const WiredTigerIndex& idx, OperationContext* txn, bool forward)
- : WiredTigerIndexCursorBase(idx, txn, forward) {}
+ WiredTigerIndexStandardCursor(const WiredTigerIndex& idx, OperationContext* opCtx, bool forward)
+ : WiredTigerIndexCursorBase(idx, opCtx, forward) {}
void updateIdAndTypeBits() override {
_id = KeyString::decodeRecordIdAtEnd(_key.getBuffer(), _key.getSize());
@@ -953,8 +955,8 @@ public:
class WiredTigerIndexUniqueCursor final : public WiredTigerIndexCursorBase {
public:
- WiredTigerIndexUniqueCursor(const WiredTigerIndex& idx, OperationContext* txn, bool forward)
- : WiredTigerIndexCursorBase(idx, txn, forward) {}
+ WiredTigerIndexUniqueCursor(const WiredTigerIndex& idx, OperationContext* opCtx, bool forward)
+ : WiredTigerIndexCursorBase(idx, opCtx, forward) {}
void updateIdAndTypeBits() override {
// We assume that cursors can only ever see unique indexes in their "pristine" state,
@@ -1000,14 +1002,14 @@ WiredTigerIndexUnique::WiredTigerIndexUnique(OperationContext* ctx,
const IndexDescriptor* desc)
: WiredTigerIndex(ctx, uri, desc) {}
-std::unique_ptr<SortedDataInterface::Cursor> WiredTigerIndexUnique::newCursor(OperationContext* txn,
- bool forward) const {
- return stdx::make_unique<WiredTigerIndexUniqueCursor>(*this, txn, forward);
+std::unique_ptr<SortedDataInterface::Cursor> WiredTigerIndexUnique::newCursor(
+ OperationContext* opCtx, bool forward) const {
+ return stdx::make_unique<WiredTigerIndexUniqueCursor>(*this, opCtx, forward);
}
-SortedDataBuilderInterface* WiredTigerIndexUnique::getBulkBuilder(OperationContext* txn,
+SortedDataBuilderInterface* WiredTigerIndexUnique::getBulkBuilder(OperationContext* opCtx,
bool dupsAllowed) {
- return new UniqueBulkBuilder(this, txn, dupsAllowed);
+ return new UniqueBulkBuilder(this, opCtx, dupsAllowed);
}
Status WiredTigerIndexUnique::_insert(WT_CURSOR* c,
@@ -1164,15 +1166,15 @@ WiredTigerIndexStandard::WiredTigerIndexStandard(OperationContext* ctx,
: WiredTigerIndex(ctx, uri, desc) {}
std::unique_ptr<SortedDataInterface::Cursor> WiredTigerIndexStandard::newCursor(
- OperationContext* txn, bool forward) const {
- return stdx::make_unique<WiredTigerIndexStandardCursor>(*this, txn, forward);
+ OperationContext* opCtx, bool forward) const {
+ return stdx::make_unique<WiredTigerIndexStandardCursor>(*this, opCtx, forward);
}
-SortedDataBuilderInterface* WiredTigerIndexStandard::getBulkBuilder(OperationContext* txn,
+SortedDataBuilderInterface* WiredTigerIndexStandard::getBulkBuilder(OperationContext* opCtx,
bool dupsAllowed) {
// We aren't unique so dups better be allowed.
invariant(dupsAllowed);
- return new StandardBulkBuilder(this, txn);
+ return new StandardBulkBuilder(this, opCtx);
}
Status WiredTigerIndexStandard::_insert(WT_CURSOR* c,
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.h b/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
index 6a2d49c7002..20485fa8f9d 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
@@ -72,37 +72,37 @@ public:
* Creates a WiredTiger table suitable for implementing a MongoDB index.
* 'config' should be created with generateCreateString().
*/
- static int Create(OperationContext* txn, const std::string& uri, const std::string& config);
+ static int Create(OperationContext* opCtx, const std::string& uri, const std::string& config);
WiredTigerIndex(OperationContext* ctx, const std::string& uri, const IndexDescriptor* desc);
- virtual Status insert(OperationContext* txn,
+ virtual Status insert(OperationContext* opCtx,
const BSONObj& key,
const RecordId& id,
bool dupsAllowed);
- virtual void unindex(OperationContext* txn,
+ virtual void unindex(OperationContext* opCtx,
const BSONObj& key,
const RecordId& id,
bool dupsAllowed);
- virtual void fullValidate(OperationContext* txn,
+ virtual void fullValidate(OperationContext* opCtx,
long long* numKeysOut,
ValidateResults* fullResults) const;
- virtual bool appendCustomStats(OperationContext* txn,
+ virtual bool appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* output,
double scale) const;
- virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& id);
+ virtual Status dupKeyCheck(OperationContext* opCtx, const BSONObj& key, const RecordId& id);
- virtual bool isEmpty(OperationContext* txn);
+ virtual bool isEmpty(OperationContext* opCtx);
- virtual Status touch(OperationContext* txn) const;
+ virtual Status touch(OperationContext* opCtx) const;
- virtual long long getSpaceUsedBytes(OperationContext* txn) const;
+ virtual long long getSpaceUsedBytes(OperationContext* opCtx) const;
- virtual Status initAsEmpty(OperationContext* txn);
+ virtual Status initAsEmpty(OperationContext* opCtx);
- virtual Status compact(OperationContext* txn);
+ virtual Status compact(OperationContext* opCtx);
const std::string& uri() const {
return _uri;
@@ -162,10 +162,10 @@ public:
const std::string& uri,
const IndexDescriptor* desc);
- std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* txn,
+ std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* opCtx,
bool forward) const override;
- SortedDataBuilderInterface* getBulkBuilder(OperationContext* txn, bool dupsAllowed) override;
+ SortedDataBuilderInterface* getBulkBuilder(OperationContext* opCtx, bool dupsAllowed) override;
bool unique() const override {
return true;
@@ -182,10 +182,10 @@ public:
const std::string& uri,
const IndexDescriptor* desc);
- std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* txn,
+ std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* opCtx,
bool forward) const override;
- SortedDataBuilderInterface* getBulkBuilder(OperationContext* txn, bool dupsAllowed) override;
+ SortedDataBuilderInterface* getBulkBuilder(OperationContext* opCtx, bool dupsAllowed) override;
bool unique() const override {
return false;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp
index 47c1c81d352..eceeb4e28d7 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp
@@ -70,7 +70,7 @@ public:
std::unique_ptr<SortedDataInterface> newSortedDataInterface(bool unique) final {
std::string ns = "test.wt";
- OperationContextNoop txn(newRecoveryUnit().release());
+ OperationContextNoop opCtx(newRecoveryUnit().release());
BSONObj spec = BSON("key" << BSON("a" << 1) << "name"
<< "testIndex"
@@ -84,11 +84,11 @@ public:
ASSERT_OK(result.getStatus());
string uri = "table:" + ns;
- invariantWTOK(WiredTigerIndex::Create(&txn, uri, result.getValue()));
+ invariantWTOK(WiredTigerIndex::Create(&opCtx, uri, result.getValue()));
if (unique)
- return stdx::make_unique<WiredTigerIndexUnique>(&txn, uri, &desc);
- return stdx::make_unique<WiredTigerIndexStandard>(&txn, uri, &desc);
+ return stdx::make_unique<WiredTigerIndexUnique>(&opCtx, uri, &desc);
+ return stdx::make_unique<WiredTigerIndexStandard>(&opCtx, uri, &desc);
}
std::unique_ptr<RecoveryUnit> newRecoveryUnit() final {
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index 682fd842aaf..060f4b58288 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -137,7 +137,7 @@ public:
TicketServerParameter(TicketHolder* holder, const std::string& name)
: ServerParameter(ServerParameterSet::getGlobal(), name, true, true), _holder(holder) {}
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
+ virtual void append(OperationContext* opCtx, BSONObjBuilder& b, const std::string& name) {
b.append(name, _holder->outof());
}
@@ -401,7 +401,7 @@ Status WiredTigerKVEngine::_salvageIfNeeded(const char* uri) {
return wtRCToStatus(session->salvage(session, uri, NULL), "Salvage failed:");
}
-int WiredTigerKVEngine::flushAllFiles(OperationContext* txn, bool sync) {
+int WiredTigerKVEngine::flushAllFiles(OperationContext* opCtx, bool sync) {
LOG(1) << "WiredTigerKVEngine::flushAllFiles";
if (_ephemeral) {
return 0;
@@ -412,7 +412,7 @@ int WiredTigerKVEngine::flushAllFiles(OperationContext* txn, bool sync) {
return 1;
}
-Status WiredTigerKVEngine::beginBackup(OperationContext* txn) {
+Status WiredTigerKVEngine::beginBackup(OperationContext* opCtx) {
invariant(!_backupSession);
// This cursor will be freed by the backupSession being closed as the session is uncached
@@ -427,7 +427,7 @@ Status WiredTigerKVEngine::beginBackup(OperationContext* txn) {
return Status::OK();
}
-void WiredTigerKVEngine::endBackup(OperationContext* txn) {
+void WiredTigerKVEngine::endBackup(OperationContext* opCtx) {
_backupSession.reset();
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
index 8f632ef537c..1386eb7808d 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
@@ -108,11 +108,11 @@ public:
StringData ident,
const RecordStore* originalRecordStore) const;
- virtual int flushAllFiles(OperationContext* txn, bool sync);
+ virtual int flushAllFiles(OperationContext* opCtx, bool sync);
- virtual Status beginBackup(OperationContext* txn);
+ virtual Status beginBackup(OperationContext* opCtx);
- virtual void endBackup(OperationContext* txn);
+ virtual void endBackup(OperationContext* opCtx);
virtual int64_t getIdentSize(OperationContext* opCtx, StringData ident);
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp
index 80b2344969c..d5a74f184ce 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp
@@ -46,7 +46,7 @@ WiredTigerEngineRuntimeConfigParameter::WiredTigerEngineRuntimeConfigParameter(
_engine(engine) {}
-void WiredTigerEngineRuntimeConfigParameter::append(OperationContext* txn,
+void WiredTigerEngineRuntimeConfigParameter::append(OperationContext* opCtx,
BSONObjBuilder& b,
const std::string& name) {
b << name << "";
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.h b/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.h
index 6742f76be99..9bc4699794f 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.h
@@ -41,7 +41,7 @@ class WiredTigerEngineRuntimeConfigParameter : public ServerParameter {
public:
explicit WiredTigerEngineRuntimeConfigParameter(WiredTigerKVEngine* engine);
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name);
+ virtual void append(OperationContext* opCtx, BSONObjBuilder& b, const std::string& name);
virtual Status set(const BSONElement& newValueElement);
virtual Status setFromString(const std::string& str);
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
index 6a33378a070..a88754cf992 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
@@ -143,7 +143,7 @@ private:
OplogStones* _oplogStones;
};
-WiredTigerRecordStore::OplogStones::OplogStones(OperationContext* txn, WiredTigerRecordStore* rs)
+WiredTigerRecordStore::OplogStones::OplogStones(OperationContext* opCtx, WiredTigerRecordStore* rs)
: _rs(rs) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
@@ -159,7 +159,7 @@ WiredTigerRecordStore::OplogStones::OplogStones(OperationContext* txn, WiredTige
_minBytesPerStone = maxSize / _numStonesToKeep;
invariant(_minBytesPerStone > 0);
- _calculateStones(txn);
+ _calculateStones(opCtx);
_pokeReclaimThreadIfNeeded(); // Reclaim stones if over the limit.
}
@@ -227,13 +227,16 @@ void WiredTigerRecordStore::OplogStones::createNewStoneIfNeeded(RecordId lastRec
}
void WiredTigerRecordStore::OplogStones::updateCurrentStoneAfterInsertOnCommit(
- OperationContext* txn, int64_t bytesInserted, RecordId highestInserted, int64_t countInserted) {
- txn->recoveryUnit()->registerChange(
+ OperationContext* opCtx,
+ int64_t bytesInserted,
+ RecordId highestInserted,
+ int64_t countInserted) {
+ opCtx->recoveryUnit()->registerChange(
new InsertChange(this, bytesInserted, highestInserted, countInserted));
}
-void WiredTigerRecordStore::OplogStones::clearStonesOnCommit(OperationContext* txn) {
- txn->recoveryUnit()->registerChange(new TruncateChange(this));
+void WiredTigerRecordStore::OplogStones::clearStonesOnCommit(OperationContext* opCtx) {
+ opCtx->recoveryUnit()->registerChange(new TruncateChange(this));
}
void WiredTigerRecordStore::OplogStones::updateStonesAfterCappedTruncateAfter(
@@ -285,9 +288,9 @@ void WiredTigerRecordStore::OplogStones::setNumStonesToKeep(size_t numStones) {
_numStonesToKeep = numStones;
}
-void WiredTigerRecordStore::OplogStones::_calculateStones(OperationContext* txn) {
- long long numRecords = _rs->numRecords(txn);
- long long dataSize = _rs->dataSize(txn);
+void WiredTigerRecordStore::OplogStones::_calculateStones(OperationContext* opCtx) {
+ long long numRecords = _rs->numRecords(opCtx);
+ long long dataSize = _rs->dataSize(opCtx);
log() << "The size storer reports that the oplog contains " << numRecords
<< " records totaling to " << dataSize << " bytes";
@@ -301,7 +304,7 @@ void WiredTigerRecordStore::OplogStones::_calculateStones(OperationContext* txn)
if (numRecords <= 0 || dataSize <= 0 ||
uint64_t(numRecords) <
kMinSampleRatioForRandCursor * kRandomSamplesPerStone * _numStonesToKeep) {
- _calculateStonesByScanning(txn);
+ _calculateStonesByScanning(opCtx);
return;
}
@@ -311,16 +314,16 @@ void WiredTigerRecordStore::OplogStones::_calculateStones(OperationContext* txn)
double estRecordsPerStone = std::ceil(_minBytesPerStone / avgRecordSize);
double estBytesPerStone = estRecordsPerStone * avgRecordSize;
- _calculateStonesBySampling(txn, int64_t(estRecordsPerStone), int64_t(estBytesPerStone));
+ _calculateStonesBySampling(opCtx, int64_t(estRecordsPerStone), int64_t(estBytesPerStone));
}
-void WiredTigerRecordStore::OplogStones::_calculateStonesByScanning(OperationContext* txn) {
+void WiredTigerRecordStore::OplogStones::_calculateStonesByScanning(OperationContext* opCtx) {
log() << "Scanning the oplog to determine where to place markers for truncation";
long long numRecords = 0;
long long dataSize = 0;
- auto cursor = _rs->getCursor(txn, true);
+ auto cursor = _rs->getCursor(opCtx, true);
while (auto record = cursor->next()) {
_currentRecords.addAndFetch(1);
int64_t newCurrentBytes = _currentBytes.addAndFetch(record->data.size());
@@ -336,10 +339,10 @@ void WiredTigerRecordStore::OplogStones::_calculateStonesByScanning(OperationCon
dataSize += record->data.size();
}
- _rs->updateStatsAfterRepair(txn, numRecords, dataSize);
+ _rs->updateStatsAfterRepair(opCtx, numRecords, dataSize);
}
-void WiredTigerRecordStore::OplogStones::_calculateStonesBySampling(OperationContext* txn,
+void WiredTigerRecordStore::OplogStones::_calculateStonesBySampling(OperationContext* opCtx,
int64_t estRecordsPerStone,
int64_t estBytesPerStone) {
Timestamp earliestOpTime;
@@ -347,13 +350,13 @@ void WiredTigerRecordStore::OplogStones::_calculateStonesBySampling(OperationCon
{
const bool forward = true;
- auto cursor = _rs->getCursor(txn, forward);
+ auto cursor = _rs->getCursor(opCtx, forward);
auto record = cursor->next();
if (!record) {
// This shouldn't really happen unless the size storer values are far off from reality.
// The collection is probably empty, but fall back to scanning the oplog just in case.
log() << "Failed to determine the earliest optime, falling back to scanning the oplog";
- _calculateStonesByScanning(txn);
+ _calculateStonesByScanning(opCtx);
return;
}
earliestOpTime = Timestamp(record->id.repr());
@@ -361,13 +364,13 @@ void WiredTigerRecordStore::OplogStones::_calculateStonesBySampling(OperationCon
{
const bool forward = false;
- auto cursor = _rs->getCursor(txn, forward);
+ auto cursor = _rs->getCursor(opCtx, forward);
auto record = cursor->next();
if (!record) {
// This shouldn't really happen unless the size storer values are far off from reality.
// The collection is probably empty, but fall back to scanning the oplog just in case.
log() << "Failed to determine the latest optime, falling back to scanning the oplog";
- _calculateStonesByScanning(txn);
+ _calculateStonesByScanning(opCtx);
return;
}
latestOpTime = Timestamp(record->id.repr());
@@ -376,8 +379,8 @@ void WiredTigerRecordStore::OplogStones::_calculateStonesBySampling(OperationCon
log() << "Sampling from the oplog between " << earliestOpTime.toStringPretty() << " and "
<< latestOpTime.toStringPretty() << " to determine where to place markers for truncation";
- int64_t wholeStones = _rs->numRecords(txn) / estRecordsPerStone;
- int64_t numSamples = kRandomSamplesPerStone * _rs->numRecords(txn) / estRecordsPerStone;
+ int64_t wholeStones = _rs->numRecords(opCtx) / estRecordsPerStone;
+ int64_t numSamples = kRandomSamplesPerStone * _rs->numRecords(opCtx) / estRecordsPerStone;
log() << "Taking " << numSamples << " samples and assuming that each section of oplog contains"
<< " approximately " << estRecordsPerStone << " records totaling to " << estBytesPerStone
@@ -391,7 +394,7 @@ void WiredTigerRecordStore::OplogStones::_calculateStonesBySampling(OperationCon
// approximately 'estRecordsPerStone'. Do so by oversampling the oplog, sorting the samples in
// order of their RecordId, and then choosing the samples expected to be near the right edge of
// each logical section.
- auto cursor = _rs->getRandomCursorWithOptions(txn, extraConfig);
+ auto cursor = _rs->getRandomCursorWithOptions(opCtx, extraConfig);
std::vector<RecordId> oplogEstimates;
for (int i = 0; i < numSamples; ++i) {
auto record = cursor->next();
@@ -399,7 +402,7 @@ void WiredTigerRecordStore::OplogStones::_calculateStonesBySampling(OperationCon
// This shouldn't really happen unless the size storer values are far off from reality.
// The collection is probably empty, but fall back to scanning the oplog just in case.
log() << "Failed to get enough random samples, falling back to scanning the oplog";
- _calculateStonesByScanning(txn);
+ _calculateStonesByScanning(opCtx);
return;
}
oplogEstimates.push_back(record->id);
@@ -418,8 +421,8 @@ void WiredTigerRecordStore::OplogStones::_calculateStonesBySampling(OperationCon
}
// Account for the partially filled chunk.
- _currentRecords.store(_rs->numRecords(txn) - estRecordsPerStone * wholeStones);
- _currentBytes.store(_rs->dataSize(txn) - estBytesPerStone * wholeStones);
+ _currentRecords.store(_rs->numRecords(opCtx) - estRecordsPerStone * wholeStones);
+ _currentBytes.store(_rs->dataSize(opCtx) - estBytesPerStone * wholeStones);
}
void WiredTigerRecordStore::OplogStones::_pokeReclaimThreadIfNeeded() {
@@ -430,12 +433,12 @@ void WiredTigerRecordStore::OplogStones::_pokeReclaimThreadIfNeeded() {
class WiredTigerRecordStore::Cursor final : public SeekableRecordCursor {
public:
- Cursor(OperationContext* txn, const WiredTigerRecordStore& rs, bool forward = true)
+ Cursor(OperationContext* opCtx, const WiredTigerRecordStore& rs, bool forward = true)
: _rs(rs),
- _txn(txn),
+ _opCtx(opCtx),
_forward(forward),
- _readUntilForOplog(WiredTigerRecoveryUnit::get(txn)->getOplogReadTill()) {
- _cursor.emplace(rs.getURI(), rs.tableId(), true, txn);
+ _readUntilForOplog(WiredTigerRecoveryUnit::get(opCtx)->getOplogReadTill()) {
+ _cursor.emplace(rs.getURI(), rs.tableId(), true, opCtx);
}
boost::optional<Record> next() final {
@@ -519,10 +522,10 @@ public:
bool restore() final {
if (!_cursor)
- _cursor.emplace(_rs.getURI(), _rs.tableId(), true, _txn);
+ _cursor.emplace(_rs.getURI(), _rs.tableId(), true, _opCtx);
// This will ensure an active session exists, so any restored cursors will bind to it
- invariant(WiredTigerRecoveryUnit::get(_txn)->getSession(_txn) == _cursor->getSession());
+ invariant(WiredTigerRecoveryUnit::get(_opCtx)->getSession(_opCtx) == _cursor->getSession());
_skipNextAdvance = false;
// If we've hit EOF, then this iterator is done and need not be restored.
@@ -566,12 +569,12 @@ public:
}
void detachFromOperationContext() final {
- _txn = nullptr;
+ _opCtx = nullptr;
_cursor = boost::none;
}
- void reattachToOperationContext(OperationContext* txn) final {
- _txn = txn;
+ void reattachToOperationContext(OperationContext* opCtx) final {
+ _opCtx = opCtx;
// _cursor recreated in restore() to avoid risk of WT_ROLLBACK issues.
}
@@ -598,7 +601,7 @@ private:
}
const WiredTigerRecordStore& _rs;
- OperationContext* _txn;
+ OperationContext* _opCtx;
const bool _forward;
bool _skipNextAdvance = false;
boost::optional<WiredTigerCursor> _cursor;
@@ -629,8 +632,8 @@ StatusWith<std::string> WiredTigerRecordStore::parseOptionsField(const BSONObj o
class WiredTigerRecordStore::RandomCursor final : public RecordCursor {
public:
- RandomCursor(OperationContext* txn, const WiredTigerRecordStore& rs, StringData config)
- : _cursor(nullptr), _rs(&rs), _txn(txn), _config(config.toString() + ",next_random") {
+ RandomCursor(OperationContext* opCtx, const WiredTigerRecordStore& rs, StringData config)
+ : _cursor(nullptr), _rs(&rs), _opCtx(opCtx), _config(config.toString() + ",next_random") {
restore();
}
@@ -668,7 +671,7 @@ public:
bool restore() final {
// We can't use the CursorCache since this cursor needs a special config string.
- WT_SESSION* session = WiredTigerRecoveryUnit::get(_txn)->getSession(_txn)->getSession();
+ WT_SESSION* session = WiredTigerRecoveryUnit::get(_opCtx)->getSession(_opCtx)->getSession();
if (!_cursor) {
invariantWTOK(session->open_cursor(
@@ -678,22 +681,22 @@ public:
return true;
}
void detachFromOperationContext() final {
- invariant(_txn);
- _txn = nullptr;
+ invariant(_opCtx);
+ _opCtx = nullptr;
if (_cursor) {
invariantWTOK(_cursor->close(_cursor));
}
_cursor = nullptr;
}
- void reattachToOperationContext(OperationContext* txn) final {
- invariant(!_txn);
- _txn = txn;
+ void reattachToOperationContext(OperationContext* opCtx) final {
+ invariant(!_opCtx);
+ _opCtx = opCtx;
}
private:
WT_CURSOR* _cursor;
const WiredTigerRecordStore* _rs;
- OperationContext* _txn;
+ OperationContext* _opCtx;
const std::string _config;
};
@@ -878,11 +881,11 @@ bool WiredTigerRecordStore::inShutdown() const {
return _shuttingDown;
}
-long long WiredTigerRecordStore::dataSize(OperationContext* txn) const {
+long long WiredTigerRecordStore::dataSize(OperationContext* opCtx) const {
return _dataSize.load();
}
-long long WiredTigerRecordStore::numRecords(OperationContext* txn) const {
+long long WiredTigerRecordStore::numRecords(OperationContext* opCtx) const {
return _numRecords.load();
}
@@ -900,13 +903,13 @@ int64_t WiredTigerRecordStore::cappedMaxSize() const {
return _cappedMaxSize;
}
-int64_t WiredTigerRecordStore::storageSize(OperationContext* txn,
+int64_t WiredTigerRecordStore::storageSize(OperationContext* opCtx,
BSONObjBuilder* extraInfo,
int infoLevel) const {
if (_isEphemeral) {
- return dataSize(txn);
+ return dataSize(opCtx);
}
- WiredTigerSession* session = WiredTigerRecoveryUnit::get(txn)->getSession(txn);
+ WiredTigerSession* session = WiredTigerRecoveryUnit::get(opCtx)->getSession(opCtx);
StatusWith<int64_t> result =
WiredTigerUtil::getStatisticsValueAs<int64_t>(session->getSession(),
"statistics:" + getURI(),
@@ -934,9 +937,9 @@ RecordData WiredTigerRecordStore::_getData(const WiredTigerCursor& cursor) const
return RecordData(data, value.size);
}
-RecordData WiredTigerRecordStore::dataFor(OperationContext* txn, const RecordId& id) const {
+RecordData WiredTigerRecordStore::dataFor(OperationContext* opCtx, const RecordId& id) const {
// ownership passes to the shared_array created below
- WiredTigerCursor curwrap(_uri, _tableId, true, txn);
+ WiredTigerCursor curwrap(_uri, _tableId, true, opCtx);
WT_CURSOR* c = curwrap.get();
invariant(c);
c->set_key(c, _makeKey(id));
@@ -946,10 +949,10 @@ RecordData WiredTigerRecordStore::dataFor(OperationContext* txn, const RecordId&
return _getData(curwrap);
}
-bool WiredTigerRecordStore::findRecord(OperationContext* txn,
+bool WiredTigerRecordStore::findRecord(OperationContext* opCtx,
const RecordId& id,
RecordData* out) const {
- WiredTigerCursor curwrap(_uri, _tableId, true, txn);
+ WiredTigerCursor curwrap(_uri, _tableId, true, opCtx);
WT_CURSOR* c = curwrap.get();
invariant(c);
c->set_key(c, _makeKey(id));
@@ -962,12 +965,12 @@ bool WiredTigerRecordStore::findRecord(OperationContext* txn,
return true;
}
-void WiredTigerRecordStore::deleteRecord(OperationContext* txn, const RecordId& id) {
+void WiredTigerRecordStore::deleteRecord(OperationContext* opCtx, const RecordId& id) {
// Deletes should never occur on a capped collection because truncation uses
// WT_SESSION::truncate().
invariant(!isCapped());
- WiredTigerCursor cursor(_uri, _tableId, true, txn);
+ WiredTigerCursor cursor(_uri, _tableId, true, opCtx);
cursor.assertInActiveTxn();
WT_CURSOR* c = cursor.get();
c->set_key(c, _makeKey(id));
@@ -983,8 +986,8 @@ void WiredTigerRecordStore::deleteRecord(OperationContext* txn, const RecordId&
ret = WT_OP_CHECK(c->remove(c));
invariantWTOK(ret);
- _changeNumRecords(txn, -1);
- _increaseDataSize(txn, -old_length);
+ _changeNumRecords(opCtx, -1);
+ _increaseDataSize(opCtx, -old_length);
}
bool WiredTigerRecordStore::cappedAndNeedDelete() const {
@@ -1000,7 +1003,7 @@ bool WiredTigerRecordStore::cappedAndNeedDelete() const {
return false;
}
-int64_t WiredTigerRecordStore::cappedDeleteAsNeeded(OperationContext* txn,
+int64_t WiredTigerRecordStore::cappedDeleteAsNeeded(OperationContext* opCtx,
const RecordId& justInserted) {
invariant(!_oplogStones);
@@ -1040,20 +1043,20 @@ int64_t WiredTigerRecordStore::cappedDeleteAsNeeded(OperationContext* txn,
}
}
- return cappedDeleteAsNeeded_inlock(txn, justInserted);
+ return cappedDeleteAsNeeded_inlock(opCtx, justInserted);
}
-int64_t WiredTigerRecordStore::cappedDeleteAsNeeded_inlock(OperationContext* txn,
+int64_t WiredTigerRecordStore::cappedDeleteAsNeeded_inlock(OperationContext* opCtx,
const RecordId& justInserted) {
// we do this in a side transaction in case it aborts
WiredTigerRecoveryUnit* realRecoveryUnit =
- checked_cast<WiredTigerRecoveryUnit*>(txn->releaseRecoveryUnit());
+ checked_cast<WiredTigerRecoveryUnit*>(opCtx->releaseRecoveryUnit());
invariant(realRecoveryUnit);
WiredTigerSessionCache* sc = realRecoveryUnit->getSessionCache();
OperationContext::RecoveryUnitState const realRUstate =
- txn->setRecoveryUnit(new WiredTigerRecoveryUnit(sc), OperationContext::kNotInUnitOfWork);
+ opCtx->setRecoveryUnit(new WiredTigerRecoveryUnit(sc), OperationContext::kNotInUnitOfWork);
- WT_SESSION* session = WiredTigerRecoveryUnit::get(txn)->getSession(txn)->getSession();
+ WT_SESSION* session = WiredTigerRecoveryUnit::get(opCtx)->getSession(opCtx)->getSession();
int64_t dataSize = _dataSize.load();
int64_t numRecords = _numRecords.load();
@@ -1065,9 +1068,9 @@ int64_t WiredTigerRecordStore::cappedDeleteAsNeeded_inlock(OperationContext* txn
docsOverCap = numRecords - _cappedMaxDocs;
try {
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
- WiredTigerCursor curwrap(_uri, _tableId, true, txn);
+ WiredTigerCursor curwrap(_uri, _tableId, true, opCtx);
WT_CURSOR* truncateEnd = curwrap.get();
RecordId newestIdToDelete;
int ret = 0;
@@ -1109,7 +1112,7 @@ int64_t WiredTigerRecordStore::cappedDeleteAsNeeded_inlock(OperationContext* txn
stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex);
if (_cappedCallback) {
uassertStatusOK(_cappedCallback->aboutToDeleteCapped(
- txn,
+ opCtx,
newestIdToDelete,
RecordData(static_cast<const char*>(old_value.data), old_value.size)));
}
@@ -1136,7 +1139,7 @@ int64_t WiredTigerRecordStore::cappedDeleteAsNeeded_inlock(OperationContext* txn
}
invariantWTOK(truncateEnd->prev(truncateEnd)); // put the cursor back where it was
- WiredTigerCursor startWrap(_uri, _tableId, true, txn);
+ WiredTigerCursor startWrap(_uri, _tableId, true, opCtx);
WT_CURSOR* truncateStart = startWrap.get();
// If we know where the start point is, set it for the truncate
@@ -1153,35 +1156,35 @@ int64_t WiredTigerRecordStore::cappedDeleteAsNeeded_inlock(OperationContext* txn
docsRemoved = 0;
} else {
invariantWTOK(ret);
- _changeNumRecords(txn, -docsRemoved);
- _increaseDataSize(txn, -sizeSaved);
+ _changeNumRecords(opCtx, -docsRemoved);
+ _increaseDataSize(opCtx, -sizeSaved);
wuow.commit();
// Save the key for the next round
_cappedFirstRecord = firstRemainingId;
}
}
} catch (const WriteConflictException& wce) {
- delete txn->releaseRecoveryUnit();
- txn->setRecoveryUnit(realRecoveryUnit, realRUstate);
+ delete opCtx->releaseRecoveryUnit();
+ opCtx->setRecoveryUnit(realRecoveryUnit, realRUstate);
log() << "got conflict truncating capped, ignoring";
return 0;
} catch (...) {
- delete txn->releaseRecoveryUnit();
- txn->setRecoveryUnit(realRecoveryUnit, realRUstate);
+ delete opCtx->releaseRecoveryUnit();
+ opCtx->setRecoveryUnit(realRecoveryUnit, realRUstate);
throw;
}
- delete txn->releaseRecoveryUnit();
- txn->setRecoveryUnit(realRecoveryUnit, realRUstate);
+ delete opCtx->releaseRecoveryUnit();
+ opCtx->setRecoveryUnit(realRecoveryUnit, realRUstate);
return docsRemoved;
}
-bool WiredTigerRecordStore::yieldAndAwaitOplogDeletionRequest(OperationContext* txn) {
+bool WiredTigerRecordStore::yieldAndAwaitOplogDeletionRequest(OperationContext* opCtx) {
// Create another reference to the oplog stones while holding a lock on the collection to
// prevent it from being destructed.
std::shared_ptr<OplogStones> oplogStones = _oplogStones;
- Locker* locker = txn->lockState();
+ Locker* locker = opCtx->lockState();
Locker::LockSnapshot snapshot;
// Release any locks before waiting on the condition variable. It is illegal to access any
@@ -1191,7 +1194,7 @@ bool WiredTigerRecordStore::yieldAndAwaitOplogDeletionRequest(OperationContext*
// The top-level locks were freed, so also release any potential low-level (storage engine)
// locks that might be held.
- txn->recoveryUnit()->abandonSnapshot();
+ opCtx->recoveryUnit()->abandonSnapshot();
// Wait for an oplog deletion request, or for this record store to have been destroyed.
oplogStones->awaitHasExcessStonesOrDead();
@@ -1202,7 +1205,7 @@ bool WiredTigerRecordStore::yieldAndAwaitOplogDeletionRequest(OperationContext*
return !oplogStones->isDead();
}
-void WiredTigerRecordStore::reclaimOplog(OperationContext* txn) {
+void WiredTigerRecordStore::reclaimOplog(OperationContext* opCtx) {
while (auto stone = _oplogStones->peekOldestStoneIfNeeded()) {
invariant(stone->lastRecord.isNormal());
@@ -1210,23 +1213,23 @@ void WiredTigerRecordStore::reclaimOplog(OperationContext* txn) {
<< stone->lastRecord << " to remove approximately " << stone->records
<< " records totaling to " << stone->bytes << " bytes";
- WiredTigerRecoveryUnit* ru = WiredTigerRecoveryUnit::get(txn);
- WT_SESSION* session = ru->getSession(txn)->getSession();
+ WiredTigerRecoveryUnit* ru = WiredTigerRecoveryUnit::get(opCtx);
+ WT_SESSION* session = ru->getSession(opCtx)->getSession();
try {
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
- WiredTigerCursor startwrap(_uri, _tableId, true, txn);
+ WiredTigerCursor startwrap(_uri, _tableId, true, opCtx);
WT_CURSOR* start = startwrap.get();
start->set_key(start, _makeKey(_oplogStones->firstRecord));
- WiredTigerCursor endwrap(_uri, _tableId, true, txn);
+ WiredTigerCursor endwrap(_uri, _tableId, true, opCtx);
WT_CURSOR* end = endwrap.get();
end->set_key(end, _makeKey(stone->lastRecord));
invariantWTOK(session->truncate(session, nullptr, start, end, nullptr));
- _changeNumRecords(txn, -stone->records);
- _increaseDataSize(txn, -stone->bytes);
+ _changeNumRecords(opCtx, -stone->records);
+ _increaseDataSize(opCtx, -stone->bytes);
wuow.commit();
@@ -1244,13 +1247,13 @@ void WiredTigerRecordStore::reclaimOplog(OperationContext* txn) {
<< " records totaling to " << _dataSize.load() << " bytes";
}
-Status WiredTigerRecordStore::insertRecords(OperationContext* txn,
+Status WiredTigerRecordStore::insertRecords(OperationContext* opCtx,
std::vector<Record>* records,
bool enforceQuota) {
- return _insertRecords(txn, records->data(), records->size());
+ return _insertRecords(opCtx, records->data(), records->size());
}
-Status WiredTigerRecordStore::_insertRecords(OperationContext* txn,
+Status WiredTigerRecordStore::_insertRecords(OperationContext* opCtx,
Record* records,
size_t nRecords) {
// We are kind of cheating on capped collections since we write all of them at once ....
@@ -1263,7 +1266,7 @@ Status WiredTigerRecordStore::_insertRecords(OperationContext* txn,
if (_isCapped && totalLength > _cappedMaxSize)
return Status(ErrorCodes::BadValue, "object to insert exceeds cappedMaxSize");
- WiredTigerCursor curwrap(_uri, _tableId, true, txn);
+ WiredTigerCursor curwrap(_uri, _tableId, true, opCtx);
curwrap.assertInActiveTxn();
WT_CURSOR* c = curwrap.get();
invariant(c);
@@ -1281,7 +1284,7 @@ Status WiredTigerRecordStore::_insertRecords(OperationContext* txn,
} else if (_isCapped) {
stdx::lock_guard<stdx::mutex> lk(_uncommittedRecordIdsMutex);
record.id = _nextId();
- _addUncommittedRecordId_inlock(txn, record.id);
+ _addUncommittedRecordId_inlock(opCtx, record.id);
} else {
record.id = _nextId();
}
@@ -1305,24 +1308,25 @@ Status WiredTigerRecordStore::_insertRecords(OperationContext* txn,
return wtRCToStatus(ret, "WiredTigerRecordStore::insertRecord");
}
- _changeNumRecords(txn, nRecords);
- _increaseDataSize(txn, totalLength);
+ _changeNumRecords(opCtx, nRecords);
+ _increaseDataSize(opCtx, totalLength);
if (_oplogStones) {
- _oplogStones->updateCurrentStoneAfterInsertOnCommit(txn, totalLength, highestId, nRecords);
+ _oplogStones->updateCurrentStoneAfterInsertOnCommit(
+ opCtx, totalLength, highestId, nRecords);
} else {
- cappedDeleteAsNeeded(txn, highestId);
+ cappedDeleteAsNeeded(opCtx, highestId);
}
return Status::OK();
}
-StatusWith<RecordId> WiredTigerRecordStore::insertRecord(OperationContext* txn,
+StatusWith<RecordId> WiredTigerRecordStore::insertRecord(OperationContext* opCtx,
const char* data,
int len,
bool enforceQuota) {
Record record = {RecordId(), RecordData(data, len)};
- Status status = _insertRecords(txn, &record, 1);
+ Status status = _insertRecords(opCtx, &record, 1);
if (!status.isOK())
return StatusWith<RecordId>(status);
return StatusWith<RecordId>(record.id);
@@ -1362,7 +1366,7 @@ RecordId WiredTigerRecordStore::lowestCappedHiddenRecord() const {
return _uncommittedRecordIds.empty() ? RecordId() : _uncommittedRecordIds.front();
}
-Status WiredTigerRecordStore::insertRecordsWithDocWriter(OperationContext* txn,
+Status WiredTigerRecordStore::insertRecordsWithDocWriter(OperationContext* opCtx,
const DocWriter* const* docs,
size_t nDocs,
RecordId* idsOut) {
@@ -1388,7 +1392,7 @@ Status WiredTigerRecordStore::insertRecordsWithDocWriter(OperationContext* txn,
}
invariant(pos == (buffer.get() + totalSize));
- Status s = _insertRecords(txn, records.get(), nDocs);
+ Status s = _insertRecords(opCtx, records.get(), nDocs);
if (!s.isOK())
return s;
@@ -1401,13 +1405,13 @@ Status WiredTigerRecordStore::insertRecordsWithDocWriter(OperationContext* txn,
return s;
}
-Status WiredTigerRecordStore::updateRecord(OperationContext* txn,
+Status WiredTigerRecordStore::updateRecord(OperationContext* opCtx,
const RecordId& id,
const char* data,
int len,
bool enforceQuota,
UpdateNotifier* notifier) {
- WiredTigerCursor curwrap(_uri, _tableId, true, txn);
+ WiredTigerCursor curwrap(_uri, _tableId, true, opCtx);
curwrap.assertInActiveTxn();
WT_CURSOR* c = curwrap.get();
invariant(c);
@@ -1431,9 +1435,9 @@ Status WiredTigerRecordStore::updateRecord(OperationContext* txn,
ret = WT_OP_CHECK(c->insert(c));
invariantWTOK(ret);
- _increaseDataSize(txn, len - old_length);
+ _increaseDataSize(opCtx, len - old_length);
if (!_oplogStones) {
- cappedDeleteAsNeeded(txn, id);
+ cappedDeleteAsNeeded(opCtx, id);
}
return Status::OK();
@@ -1444,7 +1448,7 @@ bool WiredTigerRecordStore::updateWithDamagesSupported() const {
}
StatusWith<RecordData> WiredTigerRecordStore::updateWithDamages(
- OperationContext* txn,
+ OperationContext* opCtx,
const RecordId& id,
const RecordData& oldRec,
const char* damageSource,
@@ -1461,41 +1465,42 @@ void WiredTigerRecordStore::_oplogSetStartHack(WiredTigerRecoveryUnit* wru) cons
}
}
-std::unique_ptr<SeekableRecordCursor> WiredTigerRecordStore::getCursor(OperationContext* txn,
+std::unique_ptr<SeekableRecordCursor> WiredTigerRecordStore::getCursor(OperationContext* opCtx,
bool forward) const {
if (_isOplog && forward) {
- WiredTigerRecoveryUnit* wru = WiredTigerRecoveryUnit::get(txn);
+ WiredTigerRecoveryUnit* wru = WiredTigerRecoveryUnit::get(opCtx);
// If we already have a snapshot we don't know what it can see, unless we know no one
// else could be writing (because we hold an exclusive lock).
- if (wru->inActiveTxn() && !txn->lockState()->isNoop() &&
- !txn->lockState()->isCollectionLockedForMode(_ns, MODE_X)) {
+ if (wru->inActiveTxn() && !opCtx->lockState()->isNoop() &&
+ !opCtx->lockState()->isCollectionLockedForMode(_ns, MODE_X)) {
throw WriteConflictException();
}
_oplogSetStartHack(wru);
}
- return stdx::make_unique<Cursor>(txn, *this, forward);
+ return stdx::make_unique<Cursor>(opCtx, *this, forward);
}
-std::unique_ptr<RecordCursor> WiredTigerRecordStore::getRandomCursor(OperationContext* txn) const {
+std::unique_ptr<RecordCursor> WiredTigerRecordStore::getRandomCursor(
+ OperationContext* opCtx) const {
const char* extraConfig = "";
- return getRandomCursorWithOptions(txn, extraConfig);
+ return getRandomCursorWithOptions(opCtx, extraConfig);
}
std::unique_ptr<RecordCursor> WiredTigerRecordStore::getRandomCursorWithOptions(
- OperationContext* txn, StringData extraConfig) const {
- return stdx::make_unique<RandomCursor>(txn, *this, extraConfig);
+ OperationContext* opCtx, StringData extraConfig) const {
+ return stdx::make_unique<RandomCursor>(opCtx, *this, extraConfig);
}
std::vector<std::unique_ptr<RecordCursor>> WiredTigerRecordStore::getManyCursors(
- OperationContext* txn) const {
+ OperationContext* opCtx) const {
std::vector<std::unique_ptr<RecordCursor>> cursors(1);
- cursors[0] = stdx::make_unique<Cursor>(txn, *this, /*forward=*/true);
+ cursors[0] = stdx::make_unique<Cursor>(opCtx, *this, /*forward=*/true);
return cursors;
}
-Status WiredTigerRecordStore::truncate(OperationContext* txn) {
- WiredTigerCursor startWrap(_uri, _tableId, true, txn);
+Status WiredTigerRecordStore::truncate(OperationContext* opCtx) {
+ WiredTigerCursor startWrap(_uri, _tableId, true, opCtx);
WT_CURSOR* start = startWrap.get();
int ret = WT_OP_CHECK(start->next(start));
// Empty collections don't have anything to truncate.
@@ -1504,23 +1509,23 @@ Status WiredTigerRecordStore::truncate(OperationContext* txn) {
}
invariantWTOK(ret);
- WT_SESSION* session = WiredTigerRecoveryUnit::get(txn)->getSession(txn)->getSession();
+ WT_SESSION* session = WiredTigerRecoveryUnit::get(opCtx)->getSession(opCtx)->getSession();
invariantWTOK(WT_OP_CHECK(session->truncate(session, NULL, start, NULL, NULL)));
- _changeNumRecords(txn, -numRecords(txn));
- _increaseDataSize(txn, -dataSize(txn));
+ _changeNumRecords(opCtx, -numRecords(opCtx));
+ _increaseDataSize(opCtx, -dataSize(opCtx));
if (_oplogStones) {
- _oplogStones->clearStonesOnCommit(txn);
+ _oplogStones->clearStonesOnCommit(opCtx);
}
return Status::OK();
}
-Status WiredTigerRecordStore::compact(OperationContext* txn,
+Status WiredTigerRecordStore::compact(OperationContext* opCtx,
RecordStoreCompactAdaptor* adaptor,
const CompactOptions* options,
CompactStats* stats) {
- WiredTigerSessionCache* cache = WiredTigerRecoveryUnit::get(txn)->getSessionCache();
+ WiredTigerSessionCache* cache = WiredTigerRecoveryUnit::get(opCtx)->getSessionCache();
if (!cache->isEphemeral()) {
UniqueWiredTigerSession session = cache->getSession();
WT_SESSION* s = session->getSession();
@@ -1530,13 +1535,13 @@ Status WiredTigerRecordStore::compact(OperationContext* txn,
return Status::OK();
}
-Status WiredTigerRecordStore::validate(OperationContext* txn,
+Status WiredTigerRecordStore::validate(OperationContext* opCtx,
ValidateCmdLevel level,
ValidateAdaptor* adaptor,
ValidateResults* results,
BSONObjBuilder* output) {
if (!_isEphemeral) {
- int err = WiredTigerUtil::verifyTable(txn, _uri, &results->errors);
+ int err = WiredTigerUtil::verifyTable(opCtx, _uri, &results->errors);
if (err == EBUSY) {
const char* msg = "verify() returned EBUSY. Not treating as invalid.";
warning() << msg;
@@ -1558,12 +1563,12 @@ Status WiredTigerRecordStore::validate(OperationContext* txn,
long long nInvalid = 0;
results->valid = true;
- Cursor cursor(txn, *this, true);
+ Cursor cursor(opCtx, *this, true);
int interruptInterval = 4096;
while (auto record = cursor.next()) {
if (!(nrecords % interruptInterval))
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
++nrecords;
auto dataSize = record->data.size();
dataSizeTotal += dataSize;
@@ -1606,7 +1611,7 @@ Status WiredTigerRecordStore::validate(OperationContext* txn,
return Status::OK();
}
-void WiredTigerRecordStore::appendCustomStats(OperationContext* txn,
+void WiredTigerRecordStore::appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* result,
double scale) const {
result->appendBool("capped", _isCapped);
@@ -1616,12 +1621,12 @@ void WiredTigerRecordStore::appendCustomStats(OperationContext* txn,
result->appendIntOrLL("sleepCount", _cappedSleep.load());
result->appendIntOrLL("sleepMS", _cappedSleepMS.load());
}
- WiredTigerSession* session = WiredTigerRecoveryUnit::get(txn)->getSession(txn);
+ WiredTigerSession* session = WiredTigerRecoveryUnit::get(opCtx)->getSession(opCtx);
WT_SESSION* s = session->getSession();
BSONObjBuilder bob(result->subobjStart(_engineName));
{
BSONObjBuilder metadata(bob.subobjStart("metadata"));
- Status status = WiredTigerUtil::getApplicationMetadata(txn, getURI(), &metadata);
+ Status status = WiredTigerUtil::getApplicationMetadata(opCtx, getURI(), &metadata);
if (!status.isOK()) {
metadata.append("error", "unable to retrieve metadata");
metadata.append("code", static_cast<int>(status.code()));
@@ -1630,8 +1635,8 @@ void WiredTigerRecordStore::appendCustomStats(OperationContext* txn,
}
std::string type, sourceURI;
- WiredTigerUtil::fetchTypeAndSourceURI(txn, _uri, &type, &sourceURI);
- StatusWith<std::string> metadataResult = WiredTigerUtil::getMetadata(txn, sourceURI);
+ WiredTigerUtil::fetchTypeAndSourceURI(opCtx, _uri, &type, &sourceURI);
+ StatusWith<std::string> metadataResult = WiredTigerUtil::getMetadata(opCtx, sourceURI);
StringData creationStringName("creationString");
if (!metadataResult.isOK()) {
BSONObjBuilder creationString(bob.subobjStart(creationStringName));
@@ -1653,7 +1658,7 @@ void WiredTigerRecordStore::appendCustomStats(OperationContext* txn,
}
}
-Status WiredTigerRecordStore::touch(OperationContext* txn, BSONObjBuilder* output) const {
+Status WiredTigerRecordStore::touch(OperationContext* opCtx, BSONObjBuilder* output) const {
if (_isEphemeral) {
// Everything is already in memory.
return Status::OK();
@@ -1661,13 +1666,14 @@ Status WiredTigerRecordStore::touch(OperationContext* txn, BSONObjBuilder* outpu
return Status(ErrorCodes::CommandNotSupported, "this storage engine does not support touch");
}
-Status WiredTigerRecordStore::oplogDiskLocRegister(OperationContext* txn, const Timestamp& opTime) {
+Status WiredTigerRecordStore::oplogDiskLocRegister(OperationContext* opCtx,
+ const Timestamp& opTime) {
StatusWith<RecordId> id = oploghack::keyForOptime(opTime);
if (!id.isOK())
return id.getStatus();
stdx::lock_guard<stdx::mutex> lk(_uncommittedRecordIdsMutex);
- _addUncommittedRecordId_inlock(txn, id.getValue());
+ _addUncommittedRecordId_inlock(opCtx, id.getValue());
return Status::OK();
}
@@ -1733,38 +1739,38 @@ void WiredTigerRecordStore::_oplogJournalThreadLoop(WiredTigerSessionCache* sess
std::terminate();
}
-void WiredTigerRecordStore::waitForAllEarlierOplogWritesToBeVisible(OperationContext* txn) const {
- invariant(txn->lockState()->isNoop() || !txn->lockState()->inAWriteUnitOfWork());
+void WiredTigerRecordStore::waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx) const {
+ invariant(opCtx->lockState()->isNoop() || !opCtx->lockState()->inAWriteUnitOfWork());
// This function must not start a WT transaction, otherwise we will get stuck in an infinite
// loop of WCE handling when the getCursor() is called.
stdx::unique_lock<stdx::mutex> lk(_uncommittedRecordIdsMutex);
const auto waitingFor = _oplog_highestSeen;
- txn->waitForConditionOrInterrupt(_opsBecameVisibleCV, lk, [&] {
+ opCtx->waitForConditionOrInterrupt(_opsBecameVisibleCV, lk, [&] {
return _uncommittedRecordIds.empty() || _uncommittedRecordIds.front() > waitingFor;
});
}
-void WiredTigerRecordStore::_addUncommittedRecordId_inlock(OperationContext* txn, RecordId id) {
+void WiredTigerRecordStore::_addUncommittedRecordId_inlock(OperationContext* opCtx, RecordId id) {
dassert(_uncommittedRecordIds.empty() || _uncommittedRecordIds.back() < id);
SortedRecordIds::iterator it = _uncommittedRecordIds.insert(_uncommittedRecordIds.end(), id);
invariant(it->isNormal());
- txn->recoveryUnit()->registerChange(new CappedInsertChange(this, it));
+ opCtx->recoveryUnit()->registerChange(new CappedInsertChange(this, it));
_oplog_highestSeen = id;
}
boost::optional<RecordId> WiredTigerRecordStore::oplogStartHack(
- OperationContext* txn, const RecordId& startingPosition) const {
+ OperationContext* opCtx, const RecordId& startingPosition) const {
if (!_useOplogHack)
return boost::none;
{
- WiredTigerRecoveryUnit* wru = WiredTigerRecoveryUnit::get(txn);
+ WiredTigerRecoveryUnit* wru = WiredTigerRecoveryUnit::get(opCtx);
_oplogSetStartHack(wru);
}
- WiredTigerCursor cursor(_uri, _tableId, true, txn);
+ WiredTigerCursor cursor(_uri, _tableId, true, opCtx);
WT_CURSOR* c = cursor.get();
int cmp;
@@ -1782,7 +1788,7 @@ boost::optional<RecordId> WiredTigerRecordStore::oplogStartHack(
return _fromKey(key);
}
-void WiredTigerRecordStore::updateStatsAfterRepair(OperationContext* txn,
+void WiredTigerRecordStore::updateStatsAfterRepair(OperationContext* opCtx,
long long numRecords,
long long dataSize) {
_numRecords.store(numRecords);
@@ -1800,8 +1806,8 @@ RecordId WiredTigerRecordStore::_nextId() {
return out;
}
-WiredTigerRecoveryUnit* WiredTigerRecordStore::_getRecoveryUnit(OperationContext* txn) {
- return checked_cast<WiredTigerRecoveryUnit*>(txn->recoveryUnit());
+WiredTigerRecoveryUnit* WiredTigerRecordStore::_getRecoveryUnit(OperationContext* opCtx) {
+ return checked_cast<WiredTigerRecoveryUnit*>(opCtx->recoveryUnit());
}
class WiredTigerRecordStore::NumRecordsChange : public RecoveryUnit::Change {
@@ -1817,8 +1823,8 @@ private:
int64_t _diff;
};
-void WiredTigerRecordStore::_changeNumRecords(OperationContext* txn, int64_t diff) {
- txn->recoveryUnit()->registerChange(new NumRecordsChange(this, diff));
+void WiredTigerRecordStore::_changeNumRecords(OperationContext* opCtx, int64_t diff) {
+ opCtx->recoveryUnit()->registerChange(new NumRecordsChange(this, diff));
if (_numRecords.fetchAndAdd(diff) < 0)
_numRecords.store(std::max(diff, int64_t(0)));
}
@@ -1836,9 +1842,9 @@ private:
int64_t _amount;
};
-void WiredTigerRecordStore::_increaseDataSize(OperationContext* txn, int64_t amount) {
- if (txn)
- txn->recoveryUnit()->registerChange(new DataSizeChange(this, amount));
+void WiredTigerRecordStore::_increaseDataSize(OperationContext* opCtx, int64_t amount) {
+ if (opCtx)
+ opCtx->recoveryUnit()->registerChange(new DataSizeChange(this, amount));
if (_dataSize.fetchAndAdd(amount) < 0)
_dataSize.store(std::max(amount, int64_t(0)));
@@ -1855,10 +1861,10 @@ RecordId WiredTigerRecordStore::_fromKey(int64_t key) {
return RecordId(key);
}
-void WiredTigerRecordStore::cappedTruncateAfter(OperationContext* txn,
+void WiredTigerRecordStore::cappedTruncateAfter(OperationContext* opCtx,
RecordId end,
bool inclusive) {
- Cursor cursor(txn, *this);
+ Cursor cursor(opCtx, *this);
auto record = cursor.seekExact(end);
massert(28807, str::stream() << "Failed to seek to the record located at " << end, record);
@@ -1869,7 +1875,7 @@ void WiredTigerRecordStore::cappedTruncateAfter(OperationContext* txn,
RecordId firstRemovedId;
if (inclusive) {
- Cursor reverseCursor(txn, *this, false);
+ Cursor reverseCursor(opCtx, *this, false);
invariant(reverseCursor.seekExact(end));
auto prev = reverseCursor.next();
lastKeptId = prev ? prev->id : RecordId();
@@ -1891,7 +1897,7 @@ void WiredTigerRecordStore::cappedTruncateAfter(OperationContext* txn,
do {
if (_cappedCallback) {
uassertStatusOK(
- _cappedCallback->aboutToDeleteCapped(txn, record->id, record->data));
+ _cappedCallback->aboutToDeleteCapped(opCtx, record->id, record->data));
}
recordsRemoved++;
bytesRemoved += record->data.size();
@@ -1900,17 +1906,17 @@ void WiredTigerRecordStore::cappedTruncateAfter(OperationContext* txn,
// Truncate the collection starting from the record located at 'firstRemovedId' to the end of
// the collection.
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
- WiredTigerCursor startwrap(_uri, _tableId, true, txn);
+ WiredTigerCursor startwrap(_uri, _tableId, true, opCtx);
WT_CURSOR* start = startwrap.get();
start->set_key(start, _makeKey(firstRemovedId));
- WT_SESSION* session = WiredTigerRecoveryUnit::get(txn)->getSession(txn)->getSession();
+ WT_SESSION* session = WiredTigerRecoveryUnit::get(opCtx)->getSession(opCtx)->getSession();
invariantWTOK(session->truncate(session, nullptr, start, nullptr, nullptr));
- _changeNumRecords(txn, -recordsRemoved);
- _increaseDataSize(txn, -bytesRemoved);
+ _changeNumRecords(opCtx, -recordsRemoved);
+ _increaseDataSize(opCtx, -bytesRemoved);
wuow.commit();
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
index 50360324e75..131797e5fb5 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
@@ -86,7 +86,7 @@ public:
const CollectionOptions& options,
StringData extraStrings);
- WiredTigerRecordStore(OperationContext* txn,
+ WiredTigerRecordStore(OperationContext* opCtx,
StringData ns,
StringData uri,
std::string engineName,
@@ -102,39 +102,39 @@ public:
// name of the RecordStore implementation
virtual const char* name() const;
- virtual long long dataSize(OperationContext* txn) const;
+ virtual long long dataSize(OperationContext* opCtx) const;
- virtual long long numRecords(OperationContext* txn) const;
+ virtual long long numRecords(OperationContext* opCtx) const;
virtual bool isCapped() const;
- virtual int64_t storageSize(OperationContext* txn,
+ virtual int64_t storageSize(OperationContext* opCtx,
BSONObjBuilder* extraInfo = NULL,
int infoLevel = 0) const;
// CRUD related
- virtual RecordData dataFor(OperationContext* txn, const RecordId& id) const;
+ virtual RecordData dataFor(OperationContext* opCtx, const RecordId& id) const;
- virtual bool findRecord(OperationContext* txn, const RecordId& id, RecordData* out) const;
+ virtual bool findRecord(OperationContext* opCtx, const RecordId& id, RecordData* out) const;
- virtual void deleteRecord(OperationContext* txn, const RecordId& id);
+ virtual void deleteRecord(OperationContext* opCtx, const RecordId& id);
- virtual Status insertRecords(OperationContext* txn,
+ virtual Status insertRecords(OperationContext* opCtx,
std::vector<Record>* records,
bool enforceQuota);
- virtual StatusWith<RecordId> insertRecord(OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord(OperationContext* opCtx,
const char* data,
int len,
bool enforceQuota);
- virtual Status insertRecordsWithDocWriter(OperationContext* txn,
+ virtual Status insertRecordsWithDocWriter(OperationContext* opCtx,
const DocWriter* const* docs,
size_t nDocs,
RecordId* idsOut);
- virtual Status updateRecord(OperationContext* txn,
+ virtual Status updateRecord(OperationContext* opCtx,
const RecordId& oldLocation,
const char* data,
int len,
@@ -143,22 +143,22 @@ public:
virtual bool updateWithDamagesSupported() const;
- virtual StatusWith<RecordData> updateWithDamages(OperationContext* txn,
+ virtual StatusWith<RecordData> updateWithDamages(OperationContext* opCtx,
const RecordId& id,
const RecordData& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages);
- std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* txn,
+ std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* opCtx,
bool forward) const final;
- std::unique_ptr<RecordCursor> getRandomCursor(OperationContext* txn) const final;
+ std::unique_ptr<RecordCursor> getRandomCursor(OperationContext* opCtx) const final;
- std::unique_ptr<RecordCursor> getRandomCursorWithOptions(OperationContext* txn,
+ std::unique_ptr<RecordCursor> getRandomCursorWithOptions(OperationContext* opCtx,
StringData extraConfig) const;
- std::vector<std::unique_ptr<RecordCursor>> getManyCursors(OperationContext* txn) const final;
+ std::vector<std::unique_ptr<RecordCursor>> getManyCursors(OperationContext* opCtx) const final;
- virtual Status truncate(OperationContext* txn);
+ virtual Status truncate(OperationContext* opCtx);
virtual bool compactSupported() const {
return !_isEphemeral;
@@ -167,36 +167,36 @@ public:
return true;
}
- virtual Status compact(OperationContext* txn,
+ virtual Status compact(OperationContext* opCtx,
RecordStoreCompactAdaptor* adaptor,
const CompactOptions* options,
CompactStats* stats);
- virtual Status validate(OperationContext* txn,
+ virtual Status validate(OperationContext* opCtx,
ValidateCmdLevel level,
ValidateAdaptor* adaptor,
ValidateResults* results,
BSONObjBuilder* output);
- virtual void appendCustomStats(OperationContext* txn,
+ virtual void appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* result,
double scale) const;
- virtual Status touch(OperationContext* txn, BSONObjBuilder* output) const;
+ virtual Status touch(OperationContext* opCtx, BSONObjBuilder* output) const;
- virtual void cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive);
+ virtual void cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive);
- virtual boost::optional<RecordId> oplogStartHack(OperationContext* txn,
+ virtual boost::optional<RecordId> oplogStartHack(OperationContext* opCtx,
const RecordId& startingPosition) const;
- virtual Status oplogDiskLocRegister(OperationContext* txn, const Timestamp& opTime);
+ virtual Status oplogDiskLocRegister(OperationContext* opCtx, const Timestamp& opTime);
- virtual void updateStatsAfterRepair(OperationContext* txn,
+ virtual void updateStatsAfterRepair(OperationContext* opCtx,
long long numRecords,
long long dataSize);
- void waitForAllEarlierOplogWritesToBeVisible(OperationContext* txn) const override;
+ void waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx) const override;
bool isOplog() const {
return _isOplog;
@@ -229,18 +229,18 @@ public:
bool inShutdown() const;
- void reclaimOplog(OperationContext* txn);
+ void reclaimOplog(OperationContext* opCtx);
- int64_t cappedDeleteAsNeeded(OperationContext* txn, const RecordId& justInserted);
+ int64_t cappedDeleteAsNeeded(OperationContext* opCtx, const RecordId& justInserted);
- int64_t cappedDeleteAsNeeded_inlock(OperationContext* txn, const RecordId& justInserted);
+ int64_t cappedDeleteAsNeeded_inlock(OperationContext* opCtx, const RecordId& justInserted);
boost::timed_mutex& cappedDeleterMutex() { // NOLINT
return _cappedDeleterMutex;
}
// Returns false if the oplog was dropped while waiting for a deletion request.
- bool yieldAndAwaitOplogDeletionRequest(OperationContext* txn);
+ bool yieldAndAwaitOplogDeletionRequest(OperationContext* opCtx);
class OplogStones;
@@ -257,21 +257,21 @@ private:
class NumRecordsChange;
class DataSizeChange;
- static WiredTigerRecoveryUnit* _getRecoveryUnit(OperationContext* txn);
+ static WiredTigerRecoveryUnit* _getRecoveryUnit(OperationContext* opCtx);
static int64_t _makeKey(const RecordId& id);
static RecordId _fromKey(int64_t k);
void _dealtWithCappedId(SortedRecordIds::iterator it, bool didCommit);
- void _addUncommittedRecordId_inlock(OperationContext* txn, RecordId id);
+ void _addUncommittedRecordId_inlock(OperationContext* opCtx, RecordId id);
- Status _insertRecords(OperationContext* txn, Record* records, size_t nRecords);
+ Status _insertRecords(OperationContext* opCtx, Record* records, size_t nRecords);
RecordId _nextId();
void _setId(RecordId id);
bool cappedAndNeedDelete() const;
- void _changeNumRecords(OperationContext* txn, int64_t diff);
- void _increaseDataSize(OperationContext* txn, int64_t amount);
+ void _changeNumRecords(OperationContext* opCtx, int64_t diff);
+ void _increaseDataSize(OperationContext* opCtx, int64_t amount);
RecordData _getData(const WiredTigerCursor& cursor) const;
void _oplogSetStartHack(WiredTigerRecoveryUnit* wru) const;
void _oplogJournalThreadLoop(WiredTigerSessionCache* sessionCache);
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp
index 1d0606ee9de..ef85a84bb4f 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp
@@ -77,34 +77,34 @@ public:
return false;
}
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
try {
- ScopedTransaction transaction(&txn, MODE_IX);
+ ScopedTransaction transaction(&opCtx, MODE_IX);
- AutoGetDb autoDb(&txn, _ns.db(), MODE_IX);
+ AutoGetDb autoDb(&opCtx, _ns.db(), MODE_IX);
Database* db = autoDb.getDb();
if (!db) {
LOG(2) << "no local database yet";
return false;
}
- Lock::CollectionLock collectionLock(txn.lockState(), _ns.ns(), MODE_IX);
+ Lock::CollectionLock collectionLock(opCtx.lockState(), _ns.ns(), MODE_IX);
Collection* collection = db->getCollection(_ns);
if (!collection) {
LOG(2) << "no collection " << _ns;
return false;
}
- OldClientContext ctx(&txn, _ns.ns(), false);
+ OldClientContext ctx(&opCtx, _ns.ns(), false);
WiredTigerRecordStore* rs =
checked_cast<WiredTigerRecordStore*>(collection->getRecordStore());
- if (!rs->yieldAndAwaitOplogDeletionRequest(&txn)) {
+ if (!rs->yieldAndAwaitOplogDeletionRequest(&opCtx)) {
return false; // Oplog went away.
}
- rs->reclaimOplog(&txn);
+ rs->reclaimOplog(&opCtx);
} catch (const std::exception& e) {
severe() << "error in WiredTigerRecordStoreThread: " << e.what();
fassertFailedNoTrace(!"error in WiredTigerRecordStoreThread");
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h
index daa8998af2a..f42ade90db8 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h
@@ -51,7 +51,7 @@ public:
RecordId lastRecord; // RecordId of the last record in a chunk of the oplog.
};
- OplogStones(OperationContext* txn, WiredTigerRecordStore* rs);
+ OplogStones(OperationContext* opCtx, WiredTigerRecordStore* rs);
bool isDead();
@@ -69,12 +69,12 @@ public:
void createNewStoneIfNeeded(RecordId lastRecord);
- void updateCurrentStoneAfterInsertOnCommit(OperationContext* txn,
+ void updateCurrentStoneAfterInsertOnCommit(OperationContext* opCtx,
int64_t bytesInserted,
RecordId highestInserted,
int64_t countInserted);
- void clearStonesOnCommit(OperationContext* txn);
+ void clearStonesOnCommit(OperationContext* opCtx);
// Updates the metadata about the oplog stones after a rollback occurs.
void updateStonesAfterCappedTruncateAfter(int64_t recordsRemoved,
@@ -110,9 +110,9 @@ private:
class InsertChange;
class TruncateChange;
- void _calculateStones(OperationContext* txn);
- void _calculateStonesByScanning(OperationContext* txn);
- void _calculateStonesBySampling(OperationContext* txn,
+ void _calculateStones(OperationContext* opCtx);
+ void _calculateStonesByScanning(OperationContext* opCtx);
+ void _calculateStonesBySampling(OperationContext* opCtx,
int64_t estRecordsPerStone,
int64_t estBytesPerStone);
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
index f8e8604d31b..e47072cbe58 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
@@ -96,7 +96,7 @@ public:
}
std::unique_ptr<RecordStore> newNonCappedRecordStore(const std::string& ns) {
WiredTigerRecoveryUnit* ru = new WiredTigerRecoveryUnit(_sessionCache);
- OperationContextNoop txn(ru);
+ OperationContextNoop opCtx(ru);
string uri = "table:" + ns;
StatusWith<std::string> result = WiredTigerRecordStore::generateCreateString(
@@ -105,14 +105,14 @@ public:
std::string config = result.getValue();
{
- WriteUnitOfWork uow(&txn);
- WT_SESSION* s = ru->getSession(&txn)->getSession();
+ WriteUnitOfWork uow(&opCtx);
+ WT_SESSION* s = ru->getSession(&opCtx)->getSession();
invariantWTOK(s->create(s, uri.c_str(), config.c_str()));
uow.commit();
}
return stdx::make_unique<WiredTigerRecordStore>(
- &txn, ns, uri, kWiredTigerEngineName, false, false);
+ &opCtx, ns, uri, kWiredTigerEngineName, false, false);
}
std::unique_ptr<RecordStore> newCappedRecordStore(int64_t cappedSizeBytes,
@@ -124,7 +124,7 @@ public:
int64_t cappedMaxSize,
int64_t cappedMaxDocs) {
WiredTigerRecoveryUnit* ru = new WiredTigerRecoveryUnit(_sessionCache);
- OperationContextNoop txn(ru);
+ OperationContextNoop opCtx(ru);
string uri = "table:a.b";
CollectionOptions options;
@@ -136,14 +136,14 @@ public:
std::string config = result.getValue();
{
- WriteUnitOfWork uow(&txn);
- WT_SESSION* s = ru->getSession(&txn)->getSession();
+ WriteUnitOfWork uow(&opCtx);
+ WT_SESSION* s = ru->getSession(&opCtx)->getSession();
invariantWTOK(s->create(s, uri.c_str(), config.c_str()));
uow.commit();
}
return stdx::make_unique<WiredTigerRecordStore>(
- &txn, ns, uri, kWiredTigerEngineName, true, false, cappedMaxSize, cappedMaxDocs);
+ &opCtx, ns, uri, kWiredTigerEngineName, true, false, cappedMaxSize, cappedMaxDocs);
}
std::unique_ptr<RecoveryUnit> newRecoveryUnit() final {
@@ -782,13 +782,15 @@ TEST(WiredTigerRecordStoreTest, CappedCursorRollover) {
ASSERT(!cursor->next());
}
-RecordId _oplogOrderInsertOplog(OperationContext* txn, const unique_ptr<RecordStore>& rs, int inc) {
+RecordId _oplogOrderInsertOplog(OperationContext* opCtx,
+ const unique_ptr<RecordStore>& rs,
+ int inc) {
Timestamp opTime = Timestamp(5, inc);
WiredTigerRecordStore* wrs = checked_cast<WiredTigerRecordStore*>(rs.get());
- Status status = wrs->oplogDiskLocRegister(txn, opTime);
+ Status status = wrs->oplogDiskLocRegister(opCtx, opTime);
ASSERT_OK(status);
BSONObj obj = BSON("ts" << opTime);
- StatusWith<RecordId> res = rs->insertRecord(txn, obj.objdata(), obj.objsize(), false);
+ StatusWith<RecordId> res = rs->insertRecord(opCtx, obj.objdata(), obj.objsize(), false);
ASSERT_OK(res.getStatus());
return res.getValue();
}
@@ -879,8 +881,8 @@ TEST(WiredTigerRecordStoreTest, OplogOrder) {
// the visibility rules aren't violated. See SERVER-21645
{
auto client2 = harnessHelper->serviceContext()->makeClient("c2");
- auto txn = harnessHelper->newOperationContext(client2.get());
- rs->cappedTruncateAfter(txn.get(), id1, /*inclusive*/ false);
+ auto opCtx = harnessHelper->newOperationContext(client2.get());
+ rs->cappedTruncateAfter(opCtx.get(), id1, /*inclusive*/ false);
}
{
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
index 483e0aa4de5..69f81c96e4b 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
@@ -209,7 +209,7 @@ void WiredTigerRecoveryUnit::_txnClose(bool commit) {
}
SnapshotId WiredTigerRecoveryUnit::getSnapshotId() const {
- // TODO: use actual wiredtiger txn id
+ // TODO: use actual wiredtiger opCtx id
return SnapshotId(_mySnapshotId);
}
@@ -257,10 +257,10 @@ void WiredTigerRecoveryUnit::_txnOpen(OperationContext* opCtx) {
WiredTigerCursor::WiredTigerCursor(const std::string& uri,
uint64_t tableId,
bool forRecordStore,
- OperationContext* txn) {
+ OperationContext* opCtx) {
_tableID = tableId;
- _ru = WiredTigerRecoveryUnit::get(txn);
- _session = _ru->getSession(txn);
+ _ru = WiredTigerRecoveryUnit::get(opCtx);
+ _session = _ru->getSession(opCtx);
_cursor = _session->getCursor(uri, tableId, forRecordStore);
if (!_cursor) {
error() << "no cursor for uri: " << uri;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h
index 48e7a31f200..5822630b004 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h
@@ -82,7 +82,7 @@ public:
WiredTigerSession* getSession(OperationContext* opCtx);
/**
- * Returns a session without starting a new WT txn on the session. Will not close any already
+ * Returns a session without starting a new WT opCtx on the session. Will not close any already
* running session.
*/
@@ -101,8 +101,8 @@ public:
return _oplogReadTill;
}
- static WiredTigerRecoveryUnit* get(OperationContext* txn) {
- return checked_cast<WiredTigerRecoveryUnit*>(txn->recoveryUnit());
+ static WiredTigerRecoveryUnit* get(OperationContext* opCtx) {
+ return checked_cast<WiredTigerRecoveryUnit*>(opCtx->recoveryUnit());
}
static void appendGlobalStats(BSONObjBuilder& b);
@@ -146,7 +146,7 @@ public:
WiredTigerCursor(const std::string& uri,
uint64_t tableID,
bool forRecordStore,
- OperationContext* txn);
+ OperationContext* opCtx);
~WiredTigerCursor();
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.cpp
index 4967dfc2f86..0fc7cea7be7 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.cpp
@@ -54,12 +54,12 @@ bool WiredTigerServerStatusSection::includeByDefault() const {
return true;
}
-BSONObj WiredTigerServerStatusSection::generateSection(OperationContext* txn,
+BSONObj WiredTigerServerStatusSection::generateSection(OperationContext* opCtx,
const BSONElement& configElement) const {
// The session does not open a transaction here as one is not needed and opening one would
// mean that execution could become blocked when a new transaction cannot be allocated
// immediately.
- WiredTigerSession* session = WiredTigerRecoveryUnit::get(txn)->getSessionNoTxn(txn);
+ WiredTigerSession* session = WiredTigerRecoveryUnit::get(opCtx)->getSessionNoTxn(opCtx);
invariant(session);
WT_SESSION* s = session->getSession();
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.h b/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.h
index 5e7c3b3e8a1..d724abee7a3 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.h
@@ -43,7 +43,8 @@ class WiredTigerServerStatusSection : public ServerStatusSection {
public:
WiredTigerServerStatusSection(WiredTigerKVEngine* engine);
virtual bool includeByDefault() const;
- virtual BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const;
+ virtual BSONObj generateSection(OperationContext* opCtx,
+ const BSONElement& configElement) const;
private:
WiredTigerKVEngine* _engine;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp
index da0f618f1bd..ea42dac59d0 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp
@@ -41,13 +41,14 @@
namespace mongo {
-Status WiredTigerSnapshotManager::prepareForCreateSnapshot(OperationContext* txn) {
- WiredTigerRecoveryUnit::get(txn)->prepareForCreateSnapshot(txn);
+Status WiredTigerSnapshotManager::prepareForCreateSnapshot(OperationContext* opCtx) {
+ WiredTigerRecoveryUnit::get(opCtx)->prepareForCreateSnapshot(opCtx);
return Status::OK();
}
-Status WiredTigerSnapshotManager::createSnapshot(OperationContext* txn, const SnapshotName& name) {
- auto session = WiredTigerRecoveryUnit::get(txn)->getSession(txn)->getSession();
+Status WiredTigerSnapshotManager::createSnapshot(OperationContext* opCtx,
+ const SnapshotName& name) {
+ auto session = WiredTigerRecoveryUnit::get(opCtx)->getSession(opCtx)->getSession();
const std::string config = str::stream() << "name=" << name.asU64();
return wtRCToStatus(session->snapshot(session, config.c_str()));
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h
index 29763557fe3..d885df0c863 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h
@@ -51,7 +51,7 @@ public:
shutdown();
}
- Status prepareForCreateSnapshot(OperationContext* txn) final;
+ Status prepareForCreateSnapshot(OperationContext* opCtx) final;
Status createSnapshot(OperationContext* ru, const SnapshotName& name) final;
void setCommittedSnapshot(const SnapshotName& name) final;
void cleanupUnneededSnapshots() final;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
index 0f784c367ab..d92e3e66875 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
@@ -419,18 +419,18 @@ int WiredTigerUtil::ErrorAccumulator::onError(WT_EVENT_HANDLER* handler,
}
}
-int WiredTigerUtil::verifyTable(OperationContext* txn,
+int WiredTigerUtil::verifyTable(OperationContext* opCtx,
const std::string& uri,
std::vector<std::string>* errors) {
ErrorAccumulator eventHandler(errors);
// Try to close as much as possible to avoid EBUSY errors.
- WiredTigerRecoveryUnit::get(txn)->getSession(txn)->closeAllCursors();
- WiredTigerSessionCache* sessionCache = WiredTigerRecoveryUnit::get(txn)->getSessionCache();
+ WiredTigerRecoveryUnit::get(opCtx)->getSession(opCtx)->closeAllCursors();
+ WiredTigerSessionCache* sessionCache = WiredTigerRecoveryUnit::get(opCtx)->getSessionCache();
sessionCache->closeAllCursors();
// Open a new session with custom error handlers.
- WT_CONNECTION* conn = WiredTigerRecoveryUnit::get(txn)->getSessionCache()->conn();
+ WT_CONNECTION* conn = WiredTigerRecoveryUnit::get(opCtx)->getSessionCache()->conn();
WT_SESSION* session;
invariantWTOK(conn->open_session(conn, &eventHandler, NULL, &session));
ON_BLOCK_EXIT(session->close, session, "");
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_util.h b/src/mongo/db/storage/wiredtiger/wiredtiger_util.h
index 33673f6e652..248ac7b8450 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_util.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_util.h
@@ -214,7 +214,7 @@ public:
*
* If errors is non-NULL, all error messages will be appended to the array.
*/
- static int verifyTable(OperationContext* txn,
+ static int verifyTable(OperationContext* opCtx,
const std::string& uri,
std::vector<std::string>* errors = NULL);
diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp
index 250663a98ad..c5ce7c49101 100644
--- a/src/mongo/db/ttl.cpp
+++ b/src/mongo/db/ttl.cpp
@@ -113,8 +113,8 @@ public:
private:
void doTTLPass() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
// If part of replSet but not in a readable state (e.g. during initial sync), skip.
if (repl::getGlobalReplicationCoordinator()->getReplicationMode() ==
@@ -130,9 +130,9 @@ private:
// Get all TTL indexes from every collection.
for (const std::string& collectionNS : ttlCollections) {
- ScopedTransaction st(&txn, MODE_IS);
+ ScopedTransaction st(&opCtx, MODE_IS);
NamespaceString collectionNSS(collectionNS);
- AutoGetCollection autoGetCollection(&txn, collectionNSS, MODE_IS);
+ AutoGetCollection autoGetCollection(&opCtx, collectionNSS, MODE_IS);
Collection* coll = autoGetCollection.getCollection();
if (!coll) {
// Skip since collection has been dropped.
@@ -141,9 +141,9 @@ private:
CollectionCatalogEntry* collEntry = coll->getCatalogEntry();
std::vector<std::string> indexNames;
- collEntry->getAllIndexes(&txn, &indexNames);
+ collEntry->getAllIndexes(&opCtx, &indexNames);
for (const std::string& name : indexNames) {
- BSONObj spec = collEntry->getIndexSpec(&txn, name);
+ BSONObj spec = collEntry->getIndexSpec(&opCtx, name);
if (spec.hasField(secondsExpireField)) {
ttlIndexes.push_back(spec.getOwned());
}
@@ -152,7 +152,7 @@ private:
for (const BSONObj& idx : ttlIndexes) {
try {
- doTTLForIndex(&txn, idx);
+ doTTLForIndex(&opCtx, idx);
} catch (const DBException& dbex) {
error() << "Error processing ttl index: " << idx << " -- " << dbex.toString();
// Continue on to the next index.
@@ -165,7 +165,7 @@ private:
* Remove documents from the collection using the specified TTL index after a sufficient amount
* of time has passed according to its expiry specification.
*/
- void doTTLForIndex(OperationContext* txn, BSONObj idx) {
+ void doTTLForIndex(OperationContext* opCtx, BSONObj idx) {
const NamespaceString collectionNSS(idx["ns"].String());
if (!userAllowedWriteNS(collectionNSS).isOK()) {
error() << "namespace '" << collectionNSS
@@ -182,18 +182,18 @@ private:
LOG(1) << "ns: " << collectionNSS << " key: " << key << " name: " << name;
- AutoGetCollection autoGetCollection(txn, collectionNSS, MODE_IX);
+ AutoGetCollection autoGetCollection(opCtx, collectionNSS, MODE_IX);
Collection* collection = autoGetCollection.getCollection();
if (!collection) {
// Collection was dropped.
return;
}
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(txn, collectionNSS)) {
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, collectionNSS)) {
return;
}
- IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByName(txn, name);
+ IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByName(opCtx, name);
if (!desc) {
LOG(1) << "index not found (index build in progress? index dropped?), skipping "
<< "ttl job for: " << idx;
@@ -237,7 +237,7 @@ private:
auto qr = stdx::make_unique<QueryRequest>(collectionNSS);
qr->setFilter(query);
auto canonicalQuery = CanonicalQuery::canonicalize(
- txn, std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx, std::move(qr), ExtensionsCallbackDisallowExtensions());
invariantOK(canonicalQuery.getStatus());
DeleteStageParams params;
@@ -245,7 +245,7 @@ private:
params.canonicalQuery = canonicalQuery.getValue().get();
std::unique_ptr<PlanExecutor> exec =
- InternalPlanner::deleteWithIndexScan(txn,
+ InternalPlanner::deleteWithIndexScan(opCtx,
collection,
params,
desc,
diff --git a/src/mongo/db/views/durable_view_catalog.cpp b/src/mongo/db/views/durable_view_catalog.cpp
index 1b597081d50..849ce37d4f4 100644
--- a/src/mongo/db/views/durable_view_catalog.cpp
+++ b/src/mongo/db/views/durable_view_catalog.cpp
@@ -50,12 +50,12 @@ namespace mongo {
// DurableViewCatalog
-void DurableViewCatalog::onExternalChange(OperationContext* txn, const NamespaceString& name) {
- dassert(txn->lockState()->isDbLockedForMode(name.db(), MODE_IX));
- Database* db = dbHolder().get(txn, name.db());
+void DurableViewCatalog::onExternalChange(OperationContext* opCtx, const NamespaceString& name) {
+ dassert(opCtx->lockState()->isDbLockedForMode(name.db(), MODE_IX));
+ Database* db = dbHolder().get(opCtx, name.db());
if (db) {
- txn->recoveryUnit()->onCommit([db]() { db->getViewCatalog()->invalidate(); });
+ opCtx->recoveryUnit()->onCommit([db]() { db->getViewCatalog()->invalidate(); });
}
}
@@ -65,15 +65,15 @@ const std::string& DurableViewCatalogImpl::getName() const {
return _db->name();
}
-Status DurableViewCatalogImpl::iterate(OperationContext* txn, Callback callback) {
- dassert(txn->lockState()->isDbLockedForMode(_db->name(), MODE_IS) ||
- txn->lockState()->isDbLockedForMode(_db->name(), MODE_IX));
+Status DurableViewCatalogImpl::iterate(OperationContext* opCtx, Callback callback) {
+ dassert(opCtx->lockState()->isDbLockedForMode(_db->name(), MODE_IS) ||
+ opCtx->lockState()->isDbLockedForMode(_db->name(), MODE_IX));
Collection* systemViews = _db->getCollection(_db->getSystemViewsName());
if (!systemViews)
return Status::OK();
- Lock::CollectionLock lk(txn->lockState(), _db->getSystemViewsName(), MODE_IS);
- auto cursor = systemViews->getCursor(txn);
+ Lock::CollectionLock lk(opCtx->lockState(), _db->getSystemViewsName(), MODE_IS);
+ auto cursor = systemViews->getCursor(opCtx);
while (auto record = cursor->next()) {
RecordData& data = record->data;
@@ -119,22 +119,22 @@ Status DurableViewCatalogImpl::iterate(OperationContext* txn, Callback callback)
return Status::OK();
}
-void DurableViewCatalogImpl::upsert(OperationContext* txn,
+void DurableViewCatalogImpl::upsert(OperationContext* opCtx,
const NamespaceString& name,
const BSONObj& view) {
- dassert(txn->lockState()->isDbLockedForMode(_db->name(), MODE_X));
+ dassert(opCtx->lockState()->isDbLockedForMode(_db->name(), MODE_X));
NamespaceString systemViewsNs(_db->getSystemViewsName());
- Collection* systemViews = _db->getOrCreateCollection(txn, systemViewsNs.ns());
+ Collection* systemViews = _db->getOrCreateCollection(opCtx, systemViewsNs.ns());
const bool requireIndex = false;
- RecordId id = Helpers::findOne(txn, systemViews, BSON("_id" << name.ns()), requireIndex);
+ RecordId id = Helpers::findOne(opCtx, systemViews, BSON("_id" << name.ns()), requireIndex);
const bool enforceQuota = true;
Snapshotted<BSONObj> oldView;
- if (!id.isNormal() || !systemViews->findDoc(txn, id, &oldView)) {
+ if (!id.isNormal() || !systemViews->findDoc(opCtx, id, &oldView)) {
LOG(2) << "insert view " << view << " into " << _db->getSystemViewsName();
uassertStatusOK(
- systemViews->insertDocument(txn, view, &CurOp::get(txn)->debug(), enforceQuota));
+ systemViews->insertDocument(opCtx, view, &CurOp::get(opCtx)->debug(), enforceQuota));
} else {
OplogUpdateEntryArgs args;
args.ns = systemViewsNs.ns();
@@ -143,29 +143,29 @@ void DurableViewCatalogImpl::upsert(OperationContext* txn,
args.fromMigrate = false;
const bool assumeIndexesAreAffected = true;
- auto res = systemViews->updateDocument(txn,
+ auto res = systemViews->updateDocument(opCtx,
id,
oldView,
view,
enforceQuota,
assumeIndexesAreAffected,
- &CurOp::get(txn)->debug(),
+ &CurOp::get(opCtx)->debug(),
&args);
uassertStatusOK(res);
}
}
-void DurableViewCatalogImpl::remove(OperationContext* txn, const NamespaceString& name) {
- dassert(txn->lockState()->isDbLockedForMode(_db->name(), MODE_X));
+void DurableViewCatalogImpl::remove(OperationContext* opCtx, const NamespaceString& name) {
+ dassert(opCtx->lockState()->isDbLockedForMode(_db->name(), MODE_X));
Collection* systemViews = _db->getCollection(_db->getSystemViewsName());
if (!systemViews)
return;
const bool requireIndex = false;
- RecordId id = Helpers::findOne(txn, systemViews, BSON("_id" << name.ns()), requireIndex);
+ RecordId id = Helpers::findOne(opCtx, systemViews, BSON("_id" << name.ns()), requireIndex);
if (!id.isNormal())
return;
LOG(2) << "remove view " << name << " from " << _db->getSystemViewsName();
- systemViews->deleteDocument(txn, id, &CurOp::get(txn)->debug());
+ systemViews->deleteDocument(opCtx, id, &CurOp::get(opCtx)->debug());
}
} // namespace mongo
diff --git a/src/mongo/db/views/durable_view_catalog.h b/src/mongo/db/views/durable_view_catalog.h
index 69386768963..3ffec8a152a 100644
--- a/src/mongo/db/views/durable_view_catalog.h
+++ b/src/mongo/db/views/durable_view_catalog.h
@@ -56,14 +56,14 @@ public:
* Thread-safe method to mark a catalog name was changed. This will cause the in-memory
* view catalog to be marked invalid
*/
- static void onExternalChange(OperationContext* txn, const NamespaceString& name);
+ static void onExternalChange(OperationContext* opCtx, const NamespaceString& name);
using Callback = stdx::function<Status(const BSONObj& view)>;
- virtual Status iterate(OperationContext* txn, Callback callback) = 0;
- virtual void upsert(OperationContext* txn,
+ virtual Status iterate(OperationContext* opCtx, Callback callback) = 0;
+ virtual void upsert(OperationContext* opCtx,
const NamespaceString& name,
const BSONObj& view) = 0;
- virtual void remove(OperationContext* txn, const NamespaceString& name) = 0;
+ virtual void remove(OperationContext* opCtx, const NamespaceString& name) = 0;
virtual const std::string& getName() const = 0;
};
@@ -75,9 +75,9 @@ class DurableViewCatalogImpl final : public DurableViewCatalog {
public:
explicit DurableViewCatalogImpl(Database* db) : _db(db) {}
- Status iterate(OperationContext* txn, Callback callback);
- void upsert(OperationContext* txn, const NamespaceString& name, const BSONObj& view);
- void remove(OperationContext* txn, const NamespaceString& name);
+ Status iterate(OperationContext* opCtx, Callback callback);
+ void upsert(OperationContext* opCtx, const NamespaceString& name, const BSONObj& view);
+ void remove(OperationContext* opCtx, const NamespaceString& name);
const std::string& getName() const;
private:
diff --git a/src/mongo/db/views/view_catalog.cpp b/src/mongo/db/views/view_catalog.cpp
index d8e0e842824..a3653c794d0 100644
--- a/src/mongo/db/views/view_catalog.cpp
+++ b/src/mongo/db/views/view_catalog.cpp
@@ -56,23 +56,23 @@
namespace mongo {
namespace {
-StatusWith<std::unique_ptr<CollatorInterface>> parseCollator(OperationContext* txn,
+StatusWith<std::unique_ptr<CollatorInterface>> parseCollator(OperationContext* opCtx,
BSONObj collationSpec) {
// If 'collationSpec' is empty, return the null collator, which represents the "simple"
// collation.
if (collationSpec.isEmpty()) {
return {nullptr};
}
- return CollatorFactoryInterface::get(txn->getServiceContext())->makeFromBSON(collationSpec);
+ return CollatorFactoryInterface::get(opCtx->getServiceContext())->makeFromBSON(collationSpec);
}
} // namespace
-Status ViewCatalog::reloadIfNeeded(OperationContext* txn) {
+Status ViewCatalog::reloadIfNeeded(OperationContext* opCtx) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
- return _reloadIfNeeded_inlock(txn);
+ return _reloadIfNeeded_inlock(opCtx);
}
-Status ViewCatalog::_reloadIfNeeded_inlock(OperationContext* txn) {
+Status ViewCatalog::_reloadIfNeeded_inlock(OperationContext* opCtx) {
if (_valid.load())
return Status::OK();
@@ -81,9 +81,9 @@ Status ViewCatalog::_reloadIfNeeded_inlock(OperationContext* txn) {
// Need to reload, first clear our cache.
_viewMap.clear();
- Status status = _durable->iterate(txn, [&](const BSONObj& view) -> Status {
+ Status status = _durable->iterate(opCtx, [&](const BSONObj& view) -> Status {
BSONObj collationSpec = view.hasField("collation") ? view["collation"].Obj() : BSONObj();
- auto collator = parseCollator(txn, collationSpec);
+ auto collator = parseCollator(opCtx, collationSpec);
if (!collator.isOK()) {
return collator.getStatus();
}
@@ -106,20 +106,20 @@ Status ViewCatalog::_reloadIfNeeded_inlock(OperationContext* txn) {
return status;
}
-void ViewCatalog::iterate(OperationContext* txn, ViewIteratorCallback callback) {
+void ViewCatalog::iterate(OperationContext* opCtx, ViewIteratorCallback callback) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
- _requireValidCatalog_inlock(txn);
+ _requireValidCatalog_inlock(opCtx);
for (auto&& view : _viewMap) {
callback(*view.second);
}
}
-Status ViewCatalog::_createOrUpdateView_inlock(OperationContext* txn,
+Status ViewCatalog::_createOrUpdateView_inlock(OperationContext* opCtx,
const NamespaceString& viewName,
const NamespaceString& viewOn,
const BSONArray& pipeline,
std::unique_ptr<CollatorInterface> collator) {
- _requireValidCatalog_inlock(txn);
+ _requireValidCatalog_inlock(opCtx);
// Build the BSON definition for this view to be saved in the durable view catalog. If the
// collation is empty, omit it from the definition altogether.
@@ -136,27 +136,27 @@ Status ViewCatalog::_createOrUpdateView_inlock(OperationContext* txn,
viewName.db(), viewName.coll(), viewOn.coll(), ownedPipeline, std::move(collator));
// Check that the resulting dependency graph is acyclic and within the maximum depth.
- Status graphStatus = _upsertIntoGraph(txn, *(view.get()));
+ Status graphStatus = _upsertIntoGraph(opCtx, *(view.get()));
if (!graphStatus.isOK()) {
return graphStatus;
}
- _durable->upsert(txn, viewName, viewDefBuilder.obj());
+ _durable->upsert(opCtx, viewName, viewDefBuilder.obj());
_viewMap[viewName.ns()] = view;
- txn->recoveryUnit()->onRollback([this, viewName]() {
+ opCtx->recoveryUnit()->onRollback([this, viewName]() {
this->_viewMap.erase(viewName.ns());
this->_viewGraphNeedsRefresh = true;
});
// We may get invalidated, but we're exclusively locked, so the change must be ours.
- txn->recoveryUnit()->onCommit([this]() { this->_valid.store(true); });
+ opCtx->recoveryUnit()->onCommit([this]() { this->_valid.store(true); });
return Status::OK();
}
-Status ViewCatalog::_upsertIntoGraph(OperationContext* txn, const ViewDefinition& viewDef) {
+Status ViewCatalog::_upsertIntoGraph(OperationContext* opCtx, const ViewDefinition& viewDef) {
// Performs the insert into the graph.
- auto doInsert = [this, &txn](const ViewDefinition& viewDef, bool needsValidation) -> Status {
+ auto doInsert = [this, &opCtx](const ViewDefinition& viewDef, bool needsValidation) -> Status {
// Make a LiteParsedPipeline to determine the namespaces referenced by this pipeline.
AggregationRequest request(viewDef.viewOn(), viewDef.pipeline());
const LiteParsedPipeline liteParsedPipeline(request);
@@ -171,7 +171,7 @@ Status ViewCatalog::_upsertIntoGraph(OperationContext* txn, const ViewDefinition
resolvedNamespaces[nss.coll()] = {nss, {}};
}
boost::intrusive_ptr<ExpressionContext> expCtx =
- new ExpressionContext(txn,
+ new ExpressionContext(opCtx,
request,
CollatorInterface::cloneCollator(viewDef.defaultCollator()),
std::move(resolvedNamespaces));
@@ -194,7 +194,7 @@ Status ViewCatalog::_upsertIntoGraph(OperationContext* txn, const ViewDefinition
if (needsValidation) {
// Check the collation of all the dependent namespaces before updating the graph.
- auto collationStatus = _validateCollation_inlock(txn, viewDef, refs);
+ auto collationStatus = _validateCollation_inlock(opCtx, viewDef, refs);
if (!collationStatus.isOK()) {
return collationStatus;
}
@@ -215,7 +215,7 @@ Status ViewCatalog::_upsertIntoGraph(OperationContext* txn, const ViewDefinition
}
}
// Only if the inserts completed without error will we no longer need a refresh.
- txn->recoveryUnit()->onRollback([this]() { this->_viewGraphNeedsRefresh = true; });
+ opCtx->recoveryUnit()->onRollback([this]() { this->_viewGraphNeedsRefresh = true; });
_viewGraphNeedsRefresh = false;
}
@@ -226,11 +226,11 @@ Status ViewCatalog::_upsertIntoGraph(OperationContext* txn, const ViewDefinition
return doInsert(viewDef, true);
}
-Status ViewCatalog::_validateCollation_inlock(OperationContext* txn,
+Status ViewCatalog::_validateCollation_inlock(OperationContext* opCtx,
const ViewDefinition& view,
const std::vector<NamespaceString>& refs) {
for (auto&& potentialViewNss : refs) {
- auto otherView = _lookup_inlock(txn, potentialViewNss.ns());
+ auto otherView = _lookup_inlock(opCtx, potentialViewNss.ns());
if (otherView &&
!CollatorInterface::collatorsMatch(view.defaultCollator(),
otherView->defaultCollator())) {
@@ -243,7 +243,7 @@ Status ViewCatalog::_validateCollation_inlock(OperationContext* txn,
return Status::OK();
}
-Status ViewCatalog::createView(OperationContext* txn,
+Status ViewCatalog::createView(OperationContext* opCtx,
const NamespaceString& viewName,
const NamespaceString& viewOn,
const BSONArray& pipeline,
@@ -262,7 +262,7 @@ Status ViewCatalog::createView(OperationContext* txn,
return Status(ErrorCodes::BadValue,
"View must be created on a view or collection in the same database");
- if (_lookup_inlock(txn, StringData(viewName.ns())))
+ if (_lookup_inlock(opCtx, StringData(viewName.ns())))
return Status(ErrorCodes::NamespaceExists, "Namespace already exists");
if (!NamespaceString::validCollectionName(viewOn.coll()))
@@ -274,15 +274,15 @@ Status ViewCatalog::createView(OperationContext* txn,
ErrorCodes::InvalidNamespace,
"View name cannot start with 'system.', which is reserved for system namespaces");
- auto collator = parseCollator(txn, collation);
+ auto collator = parseCollator(opCtx, collation);
if (!collator.isOK())
return collator.getStatus();
return _createOrUpdateView_inlock(
- txn, viewName, viewOn, pipeline, std::move(collator.getValue()));
+ opCtx, viewName, viewOn, pipeline, std::move(collator.getValue()));
}
-Status ViewCatalog::modifyView(OperationContext* txn,
+Status ViewCatalog::modifyView(OperationContext* opCtx,
const NamespaceString& viewName,
const NamespaceString& viewOn,
const BSONArray& pipeline) {
@@ -300,7 +300,7 @@ Status ViewCatalog::modifyView(OperationContext* txn,
return Status(ErrorCodes::BadValue,
"View must be created on a view or collection in the same database");
- auto viewPtr = _lookup_inlock(txn, viewName.ns());
+ auto viewPtr = _lookup_inlock(opCtx, viewName.ns());
if (!viewPtr)
return Status(ErrorCodes::NamespaceNotFound,
str::stream() << "cannot modify missing view " << viewName.ns());
@@ -310,24 +310,24 @@ Status ViewCatalog::modifyView(OperationContext* txn,
str::stream() << "invalid name for 'viewOn': " << viewOn.coll());
ViewDefinition savedDefinition = *viewPtr;
- txn->recoveryUnit()->onRollback([this, txn, viewName, savedDefinition]() {
+ opCtx->recoveryUnit()->onRollback([this, opCtx, viewName, savedDefinition]() {
this->_viewMap[viewName.ns()] = std::make_shared<ViewDefinition>(savedDefinition);
});
return _createOrUpdateView_inlock(
- txn,
+ opCtx,
viewName,
viewOn,
pipeline,
CollatorInterface::cloneCollator(savedDefinition.defaultCollator()));
}
-Status ViewCatalog::dropView(OperationContext* txn, const NamespaceString& viewName) {
+Status ViewCatalog::dropView(OperationContext* opCtx, const NamespaceString& viewName) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
- _requireValidCatalog_inlock(txn);
+ _requireValidCatalog_inlock(opCtx);
// Save a copy of the view definition in case we need to roll back.
- auto viewPtr = _lookup_inlock(txn, viewName.ns());
+ auto viewPtr = _lookup_inlock(opCtx, viewName.ns());
if (!viewPtr) {
return {ErrorCodes::NamespaceNotFound,
str::stream() << "cannot drop missing view: " << viewName.ns()};
@@ -336,20 +336,21 @@ Status ViewCatalog::dropView(OperationContext* txn, const NamespaceString& viewN
ViewDefinition savedDefinition = *viewPtr;
invariant(_valid.load());
- _durable->remove(txn, viewName);
+ _durable->remove(opCtx, viewName);
_viewGraph.remove(savedDefinition.name());
_viewMap.erase(viewName.ns());
- txn->recoveryUnit()->onRollback([this, txn, viewName, savedDefinition]() {
+ opCtx->recoveryUnit()->onRollback([this, opCtx, viewName, savedDefinition]() {
this->_viewGraphNeedsRefresh = true;
this->_viewMap[viewName.ns()] = std::make_shared<ViewDefinition>(savedDefinition);
});
// We may get invalidated, but we're exclusively locked, so the change must be ours.
- txn->recoveryUnit()->onCommit([this]() { this->_valid.store(true); });
+ opCtx->recoveryUnit()->onCommit([this]() { this->_valid.store(true); });
return Status::OK();
}
-std::shared_ptr<ViewDefinition> ViewCatalog::_lookup_inlock(OperationContext* txn, StringData ns) {
+std::shared_ptr<ViewDefinition> ViewCatalog::_lookup_inlock(OperationContext* opCtx,
+ StringData ns) {
// We expect the catalog to be valid, so short-circuit other checks for best performance.
if (MONGO_unlikely(!_valid.load())) {
// If the catalog is invalid, we want to avoid references to virtualized or other invalid
@@ -357,11 +358,11 @@ std::shared_ptr<ViewDefinition> ViewCatalog::_lookup_inlock(OperationContext* tx
// invalid view definitions.
if (!NamespaceString::validCollectionName(ns))
return nullptr;
- Status status = _reloadIfNeeded_inlock(txn);
+ Status status = _reloadIfNeeded_inlock(opCtx);
// In case of errors we've already logged a message. Only uassert if there actually is
// a user connection, as otherwise we'd crash the server. The catalog will remain invalid,
// and any views after the first invalid one are ignored.
- if (txn->getClient()->isFromUserConnection())
+ if (opCtx->getClient()->isFromUserConnection())
uassertStatusOK(status);
}
@@ -372,19 +373,19 @@ std::shared_ptr<ViewDefinition> ViewCatalog::_lookup_inlock(OperationContext* tx
return nullptr;
}
-std::shared_ptr<ViewDefinition> ViewCatalog::lookup(OperationContext* txn, StringData ns) {
+std::shared_ptr<ViewDefinition> ViewCatalog::lookup(OperationContext* opCtx, StringData ns) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
- return _lookup_inlock(txn, ns);
+ return _lookup_inlock(opCtx, ns);
}
-StatusWith<ResolvedView> ViewCatalog::resolveView(OperationContext* txn,
+StatusWith<ResolvedView> ViewCatalog::resolveView(OperationContext* opCtx,
const NamespaceString& nss) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
const NamespaceString* resolvedNss = &nss;
std::vector<BSONObj> resolvedPipeline;
for (int i = 0; i < ViewGraph::kMaxViewDepth; i++) {
- auto view = _lookup_inlock(txn, resolvedNss->ns());
+ auto view = _lookup_inlock(opCtx, resolvedNss->ns());
if (!view) {
// Return error status if pipeline is too large.
int pipelineSize = 0;
diff --git a/src/mongo/db/views/view_catalog.h b/src/mongo/db/views/view_catalog.h
index f0c6dac89ca..a8fea3692f4 100644
--- a/src/mongo/db/views/view_catalog.h
+++ b/src/mongo/db/views/view_catalog.h
@@ -70,7 +70,7 @@ public:
* executes under the catalog's mutex, so it must not access other methods of the catalog,
* acquire locks or run for a long time.
*/
- void iterate(OperationContext* txn, ViewIteratorCallback callback);
+ void iterate(OperationContext* opCtx, ViewIteratorCallback callback);
/**
* Create a new view 'viewName' with contents defined by running the specified aggregation
@@ -81,7 +81,7 @@ public:
*
* Must be in WriteUnitOfWork. View creation rolls back if the unit of work aborts.
*/
- Status createView(OperationContext* txn,
+ Status createView(OperationContext* opCtx,
const NamespaceString& viewName,
const NamespaceString& viewOn,
const BSONArray& pipeline,
@@ -92,14 +92,14 @@ public:
*
* Must be in WriteUnitOfWork. The drop rolls back if the unit of work aborts.
*/
- Status dropView(OperationContext* txn, const NamespaceString& viewName);
+ Status dropView(OperationContext* opCtx, const NamespaceString& viewName);
/**
* Modify the view named 'viewName' to have the new 'viewOn' and 'pipeline'.
*
* Must be in WriteUnitOfWork. The modification rolls back if the unit of work aborts.
*/
- Status modifyView(OperationContext* txn,
+ Status modifyView(OperationContext* opCtx,
const NamespaceString& viewName,
const NamespaceString& viewOn,
const BSONArray& pipeline);
@@ -108,14 +108,14 @@ public:
* Look up the 'nss' in the view catalog, returning a shared pointer to a View definition, or
* nullptr if it doesn't exist.
*/
- std::shared_ptr<ViewDefinition> lookup(OperationContext* txn, StringData nss);
+ std::shared_ptr<ViewDefinition> lookup(OperationContext* opCtx, StringData nss);
/**
* Resolve the views on 'nss', transforming the pipeline appropriately. This function returns a
* fully-resolved view definition containing the backing namespace, the resolved pipeline and
* the collation to use for the operation.
*/
- StatusWith<ResolvedView> resolveView(OperationContext* txn, const NamespaceString& nss);
+ StatusWith<ResolvedView> resolveView(OperationContext* opCtx, const NamespaceString& nss);
/**
* Reload the views catalog if marked invalid. No-op if already valid. Does only minimal
@@ -124,7 +124,7 @@ public:
* cycle detection etc. This is implicitly called by other methods when the ViewCatalog is
* marked invalid, and on first opening a database.
*/
- Status reloadIfNeeded(OperationContext* txn);
+ Status reloadIfNeeded(OperationContext* opCtx);
/**
* To be called when direct modifications to the DurableViewCatalog have been committed, so
@@ -136,7 +136,7 @@ public:
}
private:
- Status _createOrUpdateView_inlock(OperationContext* txn,
+ Status _createOrUpdateView_inlock(OperationContext* opCtx,
const NamespaceString& viewName,
const NamespaceString& viewOn,
const BSONArray& pipeline,
@@ -145,21 +145,21 @@ private:
* Parses the view definition pipeline, attempts to upsert into the view graph, and refreshes
* the graph if necessary. Returns an error status if the resulting graph would be invalid.
*/
- Status _upsertIntoGraph(OperationContext* txn, const ViewDefinition& viewDef);
+ Status _upsertIntoGraph(OperationContext* opCtx, const ViewDefinition& viewDef);
/**
* Returns Status::OK if each view namespace in 'refs' has the same default collation as 'view'.
* Otherwise, returns ErrorCodes::OptionNotSupportedOnView.
*/
- Status _validateCollation_inlock(OperationContext* txn,
+ Status _validateCollation_inlock(OperationContext* opCtx,
const ViewDefinition& view,
const std::vector<NamespaceString>& refs);
- std::shared_ptr<ViewDefinition> _lookup_inlock(OperationContext* txn, StringData ns);
- Status _reloadIfNeeded_inlock(OperationContext* txn);
+ std::shared_ptr<ViewDefinition> _lookup_inlock(OperationContext* opCtx, StringData ns);
+ Status _reloadIfNeeded_inlock(OperationContext* opCtx);
- void _requireValidCatalog_inlock(OperationContext* txn) {
- uassertStatusOK(_reloadIfNeeded_inlock(txn));
+ void _requireValidCatalog_inlock(OperationContext* opCtx) {
+ uassertStatusOK(_reloadIfNeeded_inlock(opCtx));
invariant(_valid.load());
}
diff --git a/src/mongo/db/views/view_catalog_test.cpp b/src/mongo/db/views/view_catalog_test.cpp
index 1a034533a58..d6303eb8070 100644
--- a/src/mongo/db/views/view_catalog_test.cpp
+++ b/src/mongo/db/views/view_catalog_test.cpp
@@ -66,14 +66,14 @@ public:
static const std::string name;
using Callback = stdx::function<Status(const BSONObj& view)>;
- virtual Status iterate(OperationContext* txn, Callback callback) {
+ virtual Status iterate(OperationContext* opCtx, Callback callback) {
++_iterateCount;
return Status::OK();
}
- virtual void upsert(OperationContext* txn, const NamespaceString& name, const BSONObj& view) {
+ virtual void upsert(OperationContext* opCtx, const NamespaceString& name, const BSONObj& view) {
++_upsertCount;
}
- virtual void remove(OperationContext* txn, const NamespaceString& name) {}
+ virtual void remove(OperationContext* opCtx, const NamespaceString& name) {}
virtual const std::string& getName() const {
return name;
};
diff --git a/src/mongo/db/write_concern.cpp b/src/mongo/db/write_concern.cpp
index 77a90f0e396..314ef6c05b6 100644
--- a/src/mongo/db/write_concern.cpp
+++ b/src/mongo/db/write_concern.cpp
@@ -66,13 +66,13 @@ bool commandSpecifiesWriteConcern(const BSONObj& cmdObj) {
return cmdObj.hasField(WriteConcernOptions::kWriteConcernField);
}
-StatusWith<WriteConcernOptions> extractWriteConcern(OperationContext* txn,
+StatusWith<WriteConcernOptions> extractWriteConcern(OperationContext* opCtx,
const BSONObj& cmdObj,
const std::string& dbName) {
// The default write concern if empty is {w:1}. Specifying {w:0} is/was allowed, but is
// interpreted identically to {w:1}.
auto wcResult = WriteConcernOptions::extractWCFromCommand(
- cmdObj, dbName, repl::ReplicationCoordinator::get(txn)->getGetLastErrorDefault());
+ cmdObj, dbName, repl::ReplicationCoordinator::get(opCtx)->getGetLastErrorDefault());
if (!wcResult.isOK()) {
return wcResult.getStatus();
}
@@ -81,14 +81,14 @@ StatusWith<WriteConcernOptions> extractWriteConcern(OperationContext* txn,
if (writeConcern.usedDefault) {
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer &&
- !txn->getClient()->isInDirectClient()) {
+ !opCtx->getClient()->isInDirectClient()) {
// This is here only for backwards compatibility with 3.2 clusters which have commands
// that do not specify write concern when writing to the config server.
writeConcern = {
WriteConcernOptions::kMajority, WriteConcernOptions::SyncMode::UNSET, Seconds(30)};
}
} else {
- Status wcStatus = validateWriteConcern(txn, writeConcern, dbName);
+ Status wcStatus = validateWriteConcern(opCtx, writeConcern, dbName);
if (!wcStatus.isOK()) {
return wcStatus;
}
@@ -97,11 +97,11 @@ StatusWith<WriteConcernOptions> extractWriteConcern(OperationContext* txn,
return writeConcern;
}
-Status validateWriteConcern(OperationContext* txn,
+Status validateWriteConcern(OperationContext* opCtx,
const WriteConcernOptions& writeConcern,
StringData dbName) {
if (writeConcern.syncMode == WriteConcernOptions::SyncMode::JOURNAL &&
- !txn->getServiceContext()->getGlobalStorageEngine()->isDurable()) {
+ !opCtx->getServiceContext()->getGlobalStorageEngine()->isDurable()) {
return Status(ErrorCodes::BadValue,
"cannot use 'j' option when a host does not have journaling enabled");
}
@@ -113,7 +113,7 @@ Status validateWriteConcern(OperationContext* txn,
dbName != NamespaceString::kLocalDb && !writeConcern.validForConfigServers()) {
// The only cases where we allow non-majority writes are from within the config servers
// themselves, because these wait for write concern explicitly.
- if (!txn->getClient()->isInDirectClient()) {
+ if (!opCtx->getClient()->isInDirectClient()) {
return {ErrorCodes::BadValue,
str::stream() << "w:'majority' is the only valid write concern when writing "
"to config servers, got: "
@@ -121,7 +121,7 @@ Status validateWriteConcern(OperationContext* txn,
}
}
- const auto replMode = repl::ReplicationCoordinator::get(txn)->getReplicationMode();
+ const auto replMode = repl::ReplicationCoordinator::get(opCtx)->getReplicationMode();
if (replMode == repl::ReplicationCoordinator::modeNone && writeConcern.wNumNodes > 1) {
return Status(ErrorCodes::BadValue, "cannot use 'w' > 1 when a host is not replicated");
@@ -175,13 +175,13 @@ void WriteConcernResult::appendTo(const WriteConcernOptions& writeConcern,
!result->asTempObj()["waited"].eoo()));
}
-Status waitForWriteConcern(OperationContext* txn,
+Status waitForWriteConcern(OperationContext* opCtx,
const OpTime& replOpTime,
const WriteConcernOptions& writeConcern,
WriteConcernResult* result) {
LOG(2) << "Waiting for write concern. OpTime: " << replOpTime
<< ", write concern: " << writeConcern.toBSON();
- auto replCoord = repl::ReplicationCoordinator::get(txn);
+ auto replCoord = repl::ReplicationCoordinator::get(opCtx);
MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangBeforeWaitingForWriteConcern);
@@ -199,10 +199,10 @@ Status waitForWriteConcern(OperationContext* txn,
case WriteConcernOptions::SyncMode::FSYNC: {
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
if (!storageEngine->isDurable()) {
- result->fsyncFiles = storageEngine->flushAllFiles(txn, true);
+ result->fsyncFiles = storageEngine->flushAllFiles(opCtx, true);
} else {
// We only need to commit the journal if we're durable
- txn->recoveryUnit()->waitUntilDurable();
+ opCtx->recoveryUnit()->waitUntilDurable();
}
break;
}
@@ -211,10 +211,10 @@ Status waitForWriteConcern(OperationContext* txn,
// Wait for ops to become durable then update replication system's
// knowledge of this.
OpTime appliedOpTime = replCoord->getMyLastAppliedOpTime();
- txn->recoveryUnit()->waitUntilDurable();
+ opCtx->recoveryUnit()->waitUntilDurable();
replCoord->setMyLastDurableOpTimeForward(appliedOpTime);
} else {
- txn->recoveryUnit()->waitUntilDurable();
+ opCtx->recoveryUnit()->waitUntilDurable();
}
break;
}
@@ -237,7 +237,7 @@ Status waitForWriteConcern(OperationContext* txn,
// Replica set stepdowns and gle mode changes are thrown as errors
repl::ReplicationCoordinator::StatusAndDuration replStatus =
- replCoord->awaitReplication(txn, replOpTime, writeConcernWithPopulatedSyncMode);
+ replCoord->awaitReplication(opCtx, replOpTime, writeConcernWithPopulatedSyncMode);
if (replStatus.status == ErrorCodes::WriteConcernFailed) {
gleWtimeouts.increment();
result->err = "timeout";
diff --git a/src/mongo/db/write_concern.h b/src/mongo/db/write_concern.h
index 13bae072b05..60c31e6ff26 100644
--- a/src/mongo/db/write_concern.h
+++ b/src/mongo/db/write_concern.h
@@ -52,14 +52,14 @@ bool commandSpecifiesWriteConcern(const BSONObj& cmdObj);
* Verifies that the writeConcern is of type Object (BSON type) and
* that the resulting writeConcern is valid for this particular host.
*/
-StatusWith<WriteConcernOptions> extractWriteConcern(OperationContext* txn,
+StatusWith<WriteConcernOptions> extractWriteConcern(OperationContext* opCtx,
const BSONObj& cmdObj,
const std::string& dbName);
/**
* Verifies that a WriteConcern is valid for this particular host and database.
*/
-Status validateWriteConcern(OperationContext* txn,
+Status validateWriteConcern(OperationContext* opCtx,
const WriteConcernOptions& writeConcern,
StringData dbName);
@@ -100,7 +100,7 @@ struct WriteConcernResult {
* Returns NotMaster if the host steps down while waiting for replication
* Returns UnknownReplWriteConcern if the wMode specified was not enforceable
*/
-Status waitForWriteConcern(OperationContext* txn,
+Status waitForWriteConcern(OperationContext* opCtx,
const repl::OpTime& replOpTime,
const WriteConcernOptions& writeConcern,
WriteConcernResult* result);
diff --git a/src/mongo/dbtests/clienttests.cpp b/src/mongo/dbtests/clienttests.cpp
index 9cb2d0b70c1..722618b9320 100644
--- a/src/mongo/dbtests/clienttests.cpp
+++ b/src/mongo/dbtests/clienttests.cpp
@@ -47,17 +47,17 @@ using std::vector;
class Base {
public:
Base(string coll) : _ns("test." + coll) {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient db(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient db(&opCtx);
db.dropDatabase("test");
}
virtual ~Base() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient db(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient db(&opCtx);
db.dropCollection(_ns);
}
@@ -74,20 +74,20 @@ class DropIndex : public Base {
public:
DropIndex() : Base("dropindex") {}
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient db(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient db(&opCtx);
db.insert(ns(), BSON("x" << 2));
ASSERT_EQUALS(1u, db.getIndexSpecs(ns()).size());
- ASSERT_OK(dbtests::createIndex(&txn, ns(), BSON("x" << 1)));
+ ASSERT_OK(dbtests::createIndex(&opCtx, ns(), BSON("x" << 1)));
ASSERT_EQUALS(2u, db.getIndexSpecs(ns()).size());
db.dropIndex(ns(), BSON("x" << 1));
ASSERT_EQUALS(1u, db.getIndexSpecs(ns()).size());
- ASSERT_OK(dbtests::createIndex(&txn, ns(), BSON("x" << 1)));
+ ASSERT_OK(dbtests::createIndex(&opCtx, ns(), BSON("x" << 1)));
ASSERT_EQUALS(2u, db.getIndexSpecs(ns()).size());
db.dropIndexes(ns());
@@ -104,11 +104,11 @@ class BuildIndex : public Base {
public:
BuildIndex() : Base("buildIndex") {}
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
- OldClientWriteContext ctx(&txn, ns());
- DBDirectClient db(&txn);
+ OldClientWriteContext ctx(&opCtx, ns());
+ DBDirectClient db(&opCtx);
db.insert(ns(), BSON("x" << 1 << "y" << 2));
db.insert(ns(), BSON("x" << 2 << "y" << 2));
@@ -117,19 +117,19 @@ public:
ASSERT(collection);
IndexCatalog* indexCatalog = collection->getIndexCatalog();
- ASSERT_EQUALS(1, indexCatalog->numIndexesReady(&txn));
+ ASSERT_EQUALS(1, indexCatalog->numIndexesReady(&opCtx));
// _id index
ASSERT_EQUALS(1U, db.getIndexSpecs(ns()).size());
ASSERT_EQUALS(ErrorCodes::DuplicateKey,
- dbtests::createIndex(&txn, ns(), BSON("y" << 1), true));
+ dbtests::createIndex(&opCtx, ns(), BSON("y" << 1), true));
- ASSERT_EQUALS(1, indexCatalog->numIndexesReady(&txn));
+ ASSERT_EQUALS(1, indexCatalog->numIndexesReady(&opCtx));
ASSERT_EQUALS(1U, db.getIndexSpecs(ns()).size());
- ASSERT_OK(dbtests::createIndex(&txn, ns(), BSON("x" << 1), true));
+ ASSERT_OK(dbtests::createIndex(&opCtx, ns(), BSON("x" << 1), true));
- ASSERT_EQUALS(2, indexCatalog->numIndexesReady(&txn));
+ ASSERT_EQUALS(2, indexCatalog->numIndexesReady(&opCtx));
ASSERT_EQUALS(2U, db.getIndexSpecs(ns()).size());
}
};
@@ -138,16 +138,16 @@ class CS_10 : public Base {
public:
CS_10() : Base("CS_10") {}
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient db(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient db(&opCtx);
const string longs(770, 'c');
for (int i = 0; i < 1111; ++i) {
db.insert(ns(), BSON("a" << i << "b" << longs));
}
- ASSERT_OK(dbtests::createIndex(&txn, ns(), BSON("a" << 1 << "b" << 1)));
+ ASSERT_OK(dbtests::createIndex(&opCtx, ns(), BSON("a" << 1 << "b" << 1)));
unique_ptr<DBClientCursor> c = db.query(ns(), Query().sort(BSON("a" << 1 << "b" << 1)));
ASSERT_EQUALS(1111, c->itcount());
@@ -158,9 +158,9 @@ class PushBack : public Base {
public:
PushBack() : Base("PushBack") {}
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient db(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient db(&opCtx);
for (int i = 0; i < 10; ++i) {
db.insert(ns(), BSON("i" << i));
@@ -204,9 +204,9 @@ class Create : public Base {
public:
Create() : Base("Create") {}
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient db(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient db(&opCtx);
db.createCollection("unittests.clienttests.create", 4096, true);
BSONObj info;
@@ -237,9 +237,9 @@ class CreateSimpleV1Index : public Base {
public:
CreateSimpleV1Index() : Base("CreateSimpleV1Index") {}
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient db(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient db(&opCtx);
db.createIndex(ns(), IndexSpec().addKey("aField").version(1));
}
@@ -249,9 +249,9 @@ class CreateSimpleNamedV1Index : public Base {
public:
CreateSimpleNamedV1Index() : Base("CreateSimpleNamedV1Index") {}
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient db(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient db(&opCtx);
db.createIndex(ns(), IndexSpec().addKey("aField").version(1).name("aFieldV1Index"));
}
@@ -261,9 +261,9 @@ class CreateCompoundNamedV1Index : public Base {
public:
CreateCompoundNamedV1Index() : Base("CreateCompoundNamedV1Index") {}
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient db(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient db(&opCtx);
db.createIndex(ns(),
IndexSpec()
@@ -279,9 +279,9 @@ public:
CreateUniqueSparseDropDupsIndexInBackground()
: Base("CreateUniqueSparseDropDupsIndexInBackground") {}
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient db(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient db(&opCtx);
db.createIndex(
ns(), IndexSpec().addKey("aField").background().unique().sparse().dropDuplicates());
@@ -292,9 +292,9 @@ class CreateComplexTextIndex : public Base {
public:
CreateComplexTextIndex() : Base("CreateComplexTextIndex") {}
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient db(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient db(&opCtx);
db.createIndex(ns(),
IndexSpec()
@@ -311,9 +311,9 @@ class Create2DIndex : public Base {
public:
Create2DIndex() : Base("Create2DIndex") {}
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient db(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient db(&opCtx);
db.createIndex(ns(),
IndexSpec()
@@ -328,9 +328,9 @@ class CreateHaystackIndex : public Base {
public:
CreateHaystackIndex() : Base("CreateHaystackIndex") {}
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient db(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient db(&opCtx);
db.createIndex(ns(),
IndexSpec()
@@ -344,9 +344,9 @@ class Create2DSphereIndex : public Base {
public:
Create2DSphereIndex() : Base("Create2DSphereIndex") {}
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient db(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient db(&opCtx);
db.createIndex(ns(),
IndexSpec()
@@ -359,9 +359,9 @@ class CreateHashedIndex : public Base {
public:
CreateHashedIndex() : Base("CreateHashedIndex") {}
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient db(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient db(&opCtx);
db.createIndex(ns(), IndexSpec().addKey("aField", IndexSpec::kIndexTypeHashed));
}
@@ -371,9 +371,9 @@ class CreateIndexFailure : public Base {
public:
CreateIndexFailure() : Base("CreateIndexFailure") {}
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient db(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient db(&opCtx);
db.createIndex(ns(), IndexSpec().addKey("aField"));
ASSERT_THROWS(db.createIndex(ns(), IndexSpec().addKey("aField").unique()), UserException);
diff --git a/src/mongo/dbtests/commandtests.cpp b/src/mongo/dbtests/commandtests.cpp
index a310184db78..a2fe4911e02 100644
--- a/src/mongo/dbtests/commandtests.cpp
+++ b/src/mongo/dbtests/commandtests.cpp
@@ -47,7 +47,7 @@ using std::string;
*/
class Base {
public:
- Base() : db(&_txn) {
+ Base() : db(&_opCtx) {
db.dropCollection(ns());
}
@@ -62,16 +62,16 @@ public:
}
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
DBDirectClient db;
};
// one namespace per command
namespace FileMD5 {
struct Base {
- Base() : db(&_txn) {
+ Base() : db(&_opCtx) {
db.dropCollection(ns());
- ASSERT_OK(dbtests::createIndex(&_txn, ns(), BSON("files_id" << 1 << "n" << 1)));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), BSON("files_id" << 1 << "n" << 1)));
}
const char* ns() {
@@ -79,7 +79,7 @@ struct Base {
}
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
DBDirectClient db;
};
struct Type0 : Base {
diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp
index 689b87fe599..e20410a3f6c 100644
--- a/src/mongo/dbtests/counttests.cpp
+++ b/src/mongo/dbtests/counttests.cpp
@@ -51,19 +51,19 @@ const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2;
class Base {
public:
Base()
- : _scopedXact(&_txn, MODE_IX),
- _lk(_txn.lockState(), nsToDatabaseSubstring(ns()), MODE_X),
- _context(&_txn, ns()),
- _client(&_txn) {
+ : _scopedXact(&_opCtx, MODE_IX),
+ _lk(_opCtx.lockState(), nsToDatabaseSubstring(ns()), MODE_X),
+ _context(&_opCtx, ns()),
+ _client(&_opCtx) {
_database = _context.db();
{
- WriteUnitOfWork wunit(&_txn);
+ WriteUnitOfWork wunit(&_opCtx);
_collection = _database->getCollection(ns());
if (_collection) {
- _database->dropCollection(&_txn, ns());
+ _database->dropCollection(&_opCtx, ns());
}
- _collection = _database->createCollection(&_txn, ns());
+ _collection = _database->createCollection(&_opCtx, ns());
wunit.commit();
}
@@ -71,8 +71,8 @@ public:
}
~Base() {
try {
- WriteUnitOfWork wunit(&_txn);
- uassertStatusOK(_database->dropCollection(&_txn, ns()));
+ WriteUnitOfWork wunit(&_opCtx);
+ uassertStatusOK(_database->dropCollection(&_opCtx, ns()));
wunit.commit();
} catch (...) {
FAIL("Exception while cleaning up collection");
@@ -85,7 +85,7 @@ protected:
}
void addIndex(const BSONObj& key) {
- Helpers::ensureIndex(&_txn,
+ Helpers::ensureIndex(&_opCtx,
_collection,
key,
kIndexVersion,
@@ -94,7 +94,7 @@ protected:
}
void insert(const char* s) {
- WriteUnitOfWork wunit(&_txn);
+ WriteUnitOfWork wunit(&_opCtx);
const BSONObj o = fromjson(s);
OpDebug* const nullOpDebug = nullptr;
@@ -104,16 +104,16 @@ protected:
oid.init();
b.appendOID("_id", &oid);
b.appendElements(o);
- _collection->insertDocument(&_txn, b.obj(), nullOpDebug, false);
+ _collection->insertDocument(&_opCtx, b.obj(), nullOpDebug, false);
} else {
- _collection->insertDocument(&_txn, o, nullOpDebug, false);
+ _collection->insertDocument(&_opCtx, o, nullOpDebug, false);
}
wunit.commit();
}
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
ScopedTransaction _scopedXact;
Lock::DBLock _lk;
diff --git a/src/mongo/dbtests/dbhelper_tests.cpp b/src/mongo/dbtests/dbhelper_tests.cpp
index fd0835ea4a2..0d2255ebea9 100644
--- a/src/mongo/dbtests/dbhelper_tests.cpp
+++ b/src/mongo/dbtests/dbhelper_tests.cpp
@@ -60,9 +60,9 @@ public:
RemoveRange() : _min(4), _max(8) {}
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient client(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient client(&opCtx);
for (int i = 0; i < 10; ++i) {
client.insert(ns, BSON("_id" << i));
@@ -70,18 +70,18 @@ public:
{
// Remove _id range [_min, _max).
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(ns), MODE_X);
- OldClientContext ctx(&txn, ns);
+ ScopedTransaction transaction(&opCtx, MODE_IX);
+ Lock::DBLock lk(opCtx.lockState(), nsToDatabaseSubstring(ns), MODE_X);
+ OldClientContext ctx(&opCtx, ns);
KeyRange range(ns, BSON("_id" << _min), BSON("_id" << _max), BSON("_id" << 1));
mongo::WriteConcernOptions dummyWriteConcern;
Helpers::removeRange(
- &txn, range, BoundInclusion::kIncludeStartKeyOnly, dummyWriteConcern);
+ &opCtx, range, BoundInclusion::kIncludeStartKeyOnly, dummyWriteConcern);
}
// Check that the expected documents remain.
- ASSERT_BSONOBJ_EQ(expected(), docs(&txn));
+ ASSERT_BSONOBJ_EQ(expected(), docs(&opCtx));
}
private:
@@ -96,8 +96,8 @@ private:
return bab.arr();
}
- BSONArray docs(OperationContext* txn) const {
- DBDirectClient client(txn);
+ BSONArray docs(OperationContext* opCtx) const {
+ DBDirectClient client(opCtx);
unique_ptr<DBClientCursor> cursor = client.query(ns, Query().hint(BSON("_id" << 1)));
BSONArrayBuilder bab;
while (cursor->more()) {
diff --git a/src/mongo/dbtests/dbtests.cpp b/src/mongo/dbtests/dbtests.cpp
index 75883c53091..586ecb3ea13 100644
--- a/src/mongo/dbtests/dbtests.cpp
+++ b/src/mongo/dbtests/dbtests.cpp
@@ -80,7 +80,7 @@ void initWireSpec() {
spec.outgoing.maxWireVersion = COMMANDS_ACCEPT_WRITE_CONCERN;
}
-Status createIndex(OperationContext* txn, StringData ns, const BSONObj& keys, bool unique) {
+Status createIndex(OperationContext* opCtx, StringData ns, const BSONObj& keys, bool unique) {
BSONObjBuilder specBuilder;
specBuilder.append("name", DBClientBase::genIndexName(keys));
specBuilder.append("ns", ns);
@@ -89,19 +89,19 @@ Status createIndex(OperationContext* txn, StringData ns, const BSONObj& keys, bo
if (unique) {
specBuilder.appendBool("unique", true);
}
- return createIndexFromSpec(txn, ns, specBuilder.done());
+ return createIndexFromSpec(opCtx, ns, specBuilder.done());
}
-Status createIndexFromSpec(OperationContext* txn, StringData ns, const BSONObj& spec) {
- AutoGetOrCreateDb autoDb(txn, nsToDatabaseSubstring(ns), MODE_X);
+Status createIndexFromSpec(OperationContext* opCtx, StringData ns, const BSONObj& spec) {
+ AutoGetOrCreateDb autoDb(opCtx, nsToDatabaseSubstring(ns), MODE_X);
Collection* coll;
{
- WriteUnitOfWork wunit(txn);
- coll = autoDb.getDb()->getOrCreateCollection(txn, ns);
+ WriteUnitOfWork wunit(opCtx);
+ coll = autoDb.getDb()->getOrCreateCollection(opCtx, ns);
invariant(coll);
wunit.commit();
}
- MultiIndexBlock indexer(txn, coll);
+ MultiIndexBlock indexer(opCtx, coll);
Status status = indexer.init(spec).getStatus();
if (status == ErrorCodes::IndexAlreadyExists) {
return Status::OK();
@@ -113,7 +113,7 @@ Status createIndexFromSpec(OperationContext* txn, StringData ns, const BSONObj&
if (!status.isOK()) {
return status;
}
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
indexer.commit();
wunit.commit();
return Status::OK();
diff --git a/src/mongo/dbtests/dbtests.h b/src/mongo/dbtests/dbtests.h
index 86de296e53d..68b7b0825fa 100644
--- a/src/mongo/dbtests/dbtests.h
+++ b/src/mongo/dbtests/dbtests.h
@@ -48,11 +48,14 @@ namespace dbtests {
/**
* Creates an index if it does not already exist.
*/
-Status createIndex(OperationContext* txn, StringData ns, const BSONObj& keys, bool unique = false);
+Status createIndex(OperationContext* opCtx,
+ StringData ns,
+ const BSONObj& keys,
+ bool unique = false);
/**
* Creates an index from a BSON spec, if it does not already exist.
*/
-Status createIndexFromSpec(OperationContext* txn, StringData ns, const BSONObj& spec);
+Status createIndexFromSpec(OperationContext* opCtx, StringData ns, const BSONObj& spec);
} // namespace dbtests
} // namespace mongo
diff --git a/src/mongo/dbtests/directclienttests.cpp b/src/mongo/dbtests/directclienttests.cpp
index 8412ba0c1e4..b9b06e81892 100644
--- a/src/mongo/dbtests/directclienttests.cpp
+++ b/src/mongo/dbtests/directclienttests.cpp
@@ -61,9 +61,9 @@ const char* ns = "a.b";
class Capped : public ClientBase {
public:
virtual void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient client(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient client(&opCtx);
for (int pass = 0; pass < 3; pass++) {
client.createCollection(ns, 1024 * 1024, true, 999);
for (int j = 0; j < pass * 3; j++)
@@ -92,9 +92,9 @@ public:
class InsertMany : ClientBase {
public:
virtual void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient client(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient client(&opCtx);
vector<BSONObj> objs;
objs.push_back(BSON("_id" << 1));
@@ -117,9 +117,9 @@ public:
class BadNSCmd : ClientBase {
public:
virtual void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient client(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient client(&opCtx);
BSONObj result;
BSONObj cmdObj = BSON("count"
@@ -131,9 +131,9 @@ public:
class BadNSQuery : ClientBase {
public:
virtual void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient client(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient client(&opCtx);
unique_ptr<DBClientCursor> cursor = client.query("", Query(), 1);
ASSERT(cursor->more());
@@ -146,9 +146,9 @@ public:
class BadNSGetMore : ClientBase {
public:
virtual void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient client(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient client(&opCtx);
unique_ptr<DBClientCursor> cursor = client.getMore("", 1, 1);
ASSERT(cursor->more());
@@ -161,9 +161,9 @@ public:
class BadNSInsert : ClientBase {
public:
virtual void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient client(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient client(&opCtx);
client.insert("", BSONObj(), 0);
ASSERT(!client.getLastError().empty());
@@ -173,9 +173,9 @@ public:
class BadNSUpdate : ClientBase {
public:
virtual void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient client(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient client(&opCtx);
client.update("", Query(), BSON("$set" << BSON("x" << 1)));
ASSERT(!client.getLastError().empty());
@@ -185,9 +185,9 @@ public:
class BadNSRemove : ClientBase {
public:
virtual void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient client(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient client(&opCtx);
client.remove("", Query());
ASSERT(!client.getLastError().empty());
diff --git a/src/mongo/dbtests/extensions_callback_real_test.cpp b/src/mongo/dbtests/extensions_callback_real_test.cpp
index 60b1c662f79..7ea4e6a4af0 100644
--- a/src/mongo/dbtests/extensions_callback_real_test.cpp
+++ b/src/mongo/dbtests/extensions_callback_real_test.cpp
@@ -49,45 +49,45 @@ public:
ExtensionsCallbackRealTest() : _nss("unittests.extensions_callback_real_test") {}
void setUp() final {
- AutoGetOrCreateDb autoDb(&_txn, _nss.db(), MODE_X);
+ AutoGetOrCreateDb autoDb(&_opCtx, _nss.db(), MODE_X);
Database* database = autoDb.getDb();
{
- WriteUnitOfWork wunit(&_txn);
- ASSERT(database->createCollection(&_txn, _nss.ns()));
+ WriteUnitOfWork wunit(&_opCtx);
+ ASSERT(database->createCollection(&_opCtx, _nss.ns()));
wunit.commit();
}
}
void tearDown() final {
- AutoGetDb autoDb(&_txn, _nss.db(), MODE_X);
+ AutoGetDb autoDb(&_opCtx, _nss.db(), MODE_X);
Database* database = autoDb.getDb();
if (!database) {
return;
}
{
- WriteUnitOfWork wunit(&_txn);
- static_cast<void>(database->dropCollection(&_txn, _nss.ns()));
+ WriteUnitOfWork wunit(&_opCtx);
+ static_cast<void>(database->dropCollection(&_opCtx, _nss.ns()));
wunit.commit();
}
}
protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
const NamespaceString _nss;
};
TEST_F(ExtensionsCallbackRealTest, TextNoIndex) {
BSONObj query = fromjson("{$text: {$search:\"awesome\"}}");
StatusWithMatchExpression result =
- ExtensionsCallbackReal(&_txn, &_nss).parseText(query.firstElement());
+ ExtensionsCallbackReal(&_opCtx, &_nss).parseText(query.firstElement());
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(ErrorCodes::IndexNotFound, result.getStatus());
}
TEST_F(ExtensionsCallbackRealTest, TextBasic) {
- ASSERT_OK(dbtests::createIndex(&_txn,
+ ASSERT_OK(dbtests::createIndex(&_opCtx,
_nss.ns(),
BSON("a"
<< "text"),
@@ -95,7 +95,7 @@ TEST_F(ExtensionsCallbackRealTest, TextBasic) {
BSONObj query = fromjson("{$text: {$search:\"awesome\", $language:\"english\"}}");
auto expr =
- unittest::assertGet(ExtensionsCallbackReal(&_txn, &_nss).parseText(query.firstElement()));
+ unittest::assertGet(ExtensionsCallbackReal(&_opCtx, &_nss).parseText(query.firstElement()));
ASSERT_EQUALS(MatchExpression::TEXT, expr->matchType());
std::unique_ptr<TextMatchExpression> textExpr(
@@ -109,7 +109,7 @@ TEST_F(ExtensionsCallbackRealTest, TextBasic) {
}
TEST_F(ExtensionsCallbackRealTest, TextLanguageError) {
- ASSERT_OK(dbtests::createIndex(&_txn,
+ ASSERT_OK(dbtests::createIndex(&_opCtx,
_nss.ns(),
BSON("a"
<< "text"),
@@ -117,13 +117,13 @@ TEST_F(ExtensionsCallbackRealTest, TextLanguageError) {
BSONObj query = fromjson("{$text: {$search:\"awesome\", $language:\"spanglish\"}}");
StatusWithMatchExpression result =
- ExtensionsCallbackReal(&_txn, &_nss).parseText(query.firstElement());
+ ExtensionsCallbackReal(&_opCtx, &_nss).parseText(query.firstElement());
ASSERT_NOT_OK(result.getStatus());
}
TEST_F(ExtensionsCallbackRealTest, TextCaseSensitiveTrue) {
- ASSERT_OK(dbtests::createIndex(&_txn,
+ ASSERT_OK(dbtests::createIndex(&_opCtx,
_nss.ns(),
BSON("a"
<< "text"),
@@ -131,7 +131,7 @@ TEST_F(ExtensionsCallbackRealTest, TextCaseSensitiveTrue) {
BSONObj query = fromjson("{$text: {$search:\"awesome\", $caseSensitive: true}}");
auto expr =
- unittest::assertGet(ExtensionsCallbackReal(&_txn, &_nss).parseText(query.firstElement()));
+ unittest::assertGet(ExtensionsCallbackReal(&_opCtx, &_nss).parseText(query.firstElement()));
ASSERT_EQUALS(MatchExpression::TEXT, expr->matchType());
std::unique_ptr<TextMatchExpression> textExpr(
@@ -140,7 +140,7 @@ TEST_F(ExtensionsCallbackRealTest, TextCaseSensitiveTrue) {
}
TEST_F(ExtensionsCallbackRealTest, TextCaseSensitiveFalse) {
- ASSERT_OK(dbtests::createIndex(&_txn,
+ ASSERT_OK(dbtests::createIndex(&_opCtx,
_nss.ns(),
BSON("a"
<< "text"),
@@ -148,7 +148,7 @@ TEST_F(ExtensionsCallbackRealTest, TextCaseSensitiveFalse) {
BSONObj query = fromjson("{$text: {$search:\"awesome\", $caseSensitive: false}}");
auto expr =
- unittest::assertGet(ExtensionsCallbackReal(&_txn, &_nss).parseText(query.firstElement()));
+ unittest::assertGet(ExtensionsCallbackReal(&_opCtx, &_nss).parseText(query.firstElement()));
ASSERT_EQUALS(MatchExpression::TEXT, expr->matchType());
std::unique_ptr<TextMatchExpression> textExpr(
@@ -157,7 +157,7 @@ TEST_F(ExtensionsCallbackRealTest, TextCaseSensitiveFalse) {
}
TEST_F(ExtensionsCallbackRealTest, TextCaseSensitiveError) {
- ASSERT_OK(dbtests::createIndex(&_txn,
+ ASSERT_OK(dbtests::createIndex(&_opCtx,
_nss.ns(),
BSON("a"
<< "text"),
@@ -165,13 +165,13 @@ TEST_F(ExtensionsCallbackRealTest, TextCaseSensitiveError) {
BSONObj query = fromjson("{$text:{$search:\"awesome\", $caseSensitive: 0}}");
StatusWithMatchExpression result =
- ExtensionsCallbackReal(&_txn, &_nss).parseText(query.firstElement());
+ ExtensionsCallbackReal(&_opCtx, &_nss).parseText(query.firstElement());
ASSERT_NOT_OK(result.getStatus());
}
TEST_F(ExtensionsCallbackRealTest, TextDiacriticSensitiveTrue) {
- ASSERT_OK(dbtests::createIndex(&_txn,
+ ASSERT_OK(dbtests::createIndex(&_opCtx,
_nss.ns(),
BSON("a"
<< "text"),
@@ -179,7 +179,7 @@ TEST_F(ExtensionsCallbackRealTest, TextDiacriticSensitiveTrue) {
BSONObj query = fromjson("{$text: {$search:\"awesome\", $diacriticSensitive: true}}");
auto expr =
- unittest::assertGet(ExtensionsCallbackReal(&_txn, &_nss).parseText(query.firstElement()));
+ unittest::assertGet(ExtensionsCallbackReal(&_opCtx, &_nss).parseText(query.firstElement()));
ASSERT_EQUALS(MatchExpression::TEXT, expr->matchType());
std::unique_ptr<TextMatchExpression> textExpr(
@@ -188,7 +188,7 @@ TEST_F(ExtensionsCallbackRealTest, TextDiacriticSensitiveTrue) {
}
TEST_F(ExtensionsCallbackRealTest, TextDiacriticSensitiveFalse) {
- ASSERT_OK(dbtests::createIndex(&_txn,
+ ASSERT_OK(dbtests::createIndex(&_opCtx,
_nss.ns(),
BSON("a"
<< "text"),
@@ -196,7 +196,7 @@ TEST_F(ExtensionsCallbackRealTest, TextDiacriticSensitiveFalse) {
BSONObj query = fromjson("{$text: {$search:\"awesome\", $diacriticSensitive: false}}");
auto expr =
- unittest::assertGet(ExtensionsCallbackReal(&_txn, &_nss).parseText(query.firstElement()));
+ unittest::assertGet(ExtensionsCallbackReal(&_opCtx, &_nss).parseText(query.firstElement()));
ASSERT_EQUALS(MatchExpression::TEXT, expr->matchType());
std::unique_ptr<TextMatchExpression> textExpr(
@@ -205,7 +205,7 @@ TEST_F(ExtensionsCallbackRealTest, TextDiacriticSensitiveFalse) {
}
TEST_F(ExtensionsCallbackRealTest, TextDiacriticSensitiveError) {
- ASSERT_OK(dbtests::createIndex(&_txn,
+ ASSERT_OK(dbtests::createIndex(&_opCtx,
_nss.ns(),
BSON("a"
<< "text"),
@@ -213,13 +213,13 @@ TEST_F(ExtensionsCallbackRealTest, TextDiacriticSensitiveError) {
BSONObj query = fromjson("{$text:{$search:\"awesome\", $diacriticSensitive: 0}}");
StatusWithMatchExpression result =
- ExtensionsCallbackReal(&_txn, &_nss).parseText(query.firstElement());
+ ExtensionsCallbackReal(&_opCtx, &_nss).parseText(query.firstElement());
ASSERT_NOT_OK(result.getStatus());
}
TEST_F(ExtensionsCallbackRealTest, TextDiacriticSensitiveAndCaseSensitiveTrue) {
- ASSERT_OK(dbtests::createIndex(&_txn,
+ ASSERT_OK(dbtests::createIndex(&_opCtx,
_nss.ns(),
BSON("a"
<< "text"),
@@ -228,7 +228,7 @@ TEST_F(ExtensionsCallbackRealTest, TextDiacriticSensitiveAndCaseSensitiveTrue) {
BSONObj query =
fromjson("{$text: {$search:\"awesome\", $diacriticSensitive: true, $caseSensitive: true}}");
auto expr =
- unittest::assertGet(ExtensionsCallbackReal(&_txn, &_nss).parseText(query.firstElement()));
+ unittest::assertGet(ExtensionsCallbackReal(&_opCtx, &_nss).parseText(query.firstElement()));
ASSERT_EQUALS(MatchExpression::TEXT, expr->matchType());
std::unique_ptr<TextMatchExpression> textExpr(
@@ -245,14 +245,14 @@ TEST_F(ExtensionsCallbackRealTest, WhereExpressionsWithSameScopeHaveSameBSONRepr
const char code[] = "function(){ return a; }";
BSONObj query1 = BSON("$where" << BSONCodeWScope(code, BSON("a" << true)));
- auto expr1 =
- unittest::assertGet(ExtensionsCallbackReal(&_txn, &_nss).parseWhere(query1.firstElement()));
+ auto expr1 = unittest::assertGet(
+ ExtensionsCallbackReal(&_opCtx, &_nss).parseWhere(query1.firstElement()));
BSONObjBuilder builder1;
expr1->serialize(&builder1);
BSONObj query2 = BSON("$where" << BSONCodeWScope(code, BSON("a" << true)));
- auto expr2 =
- unittest::assertGet(ExtensionsCallbackReal(&_txn, &_nss).parseWhere(query2.firstElement()));
+ auto expr2 = unittest::assertGet(
+ ExtensionsCallbackReal(&_opCtx, &_nss).parseWhere(query2.firstElement()));
BSONObjBuilder builder2;
expr2->serialize(&builder2);
@@ -264,14 +264,14 @@ TEST_F(ExtensionsCallbackRealTest,
const char code[] = "function(){ return a; }";
BSONObj query1 = BSON("$where" << BSONCodeWScope(code, BSON("a" << true)));
- auto expr1 =
- unittest::assertGet(ExtensionsCallbackReal(&_txn, &_nss).parseWhere(query1.firstElement()));
+ auto expr1 = unittest::assertGet(
+ ExtensionsCallbackReal(&_opCtx, &_nss).parseWhere(query1.firstElement()));
BSONObjBuilder builder1;
expr1->serialize(&builder1);
BSONObj query2 = BSON("$where" << BSONCodeWScope(code, BSON("a" << false)));
- auto expr2 =
- unittest::assertGet(ExtensionsCallbackReal(&_txn, &_nss).parseWhere(query2.firstElement()));
+ auto expr2 = unittest::assertGet(
+ ExtensionsCallbackReal(&_opCtx, &_nss).parseWhere(query2.firstElement()));
BSONObjBuilder builder2;
expr2->serialize(&builder2);
@@ -282,12 +282,12 @@ TEST_F(ExtensionsCallbackRealTest, WhereExpressionsWithSameScopeAreEquivalent) {
const char code[] = "function(){ return a; }";
BSONObj query1 = BSON("$where" << BSONCodeWScope(code, BSON("a" << true)));
- auto expr1 =
- unittest::assertGet(ExtensionsCallbackReal(&_txn, &_nss).parseWhere(query1.firstElement()));
+ auto expr1 = unittest::assertGet(
+ ExtensionsCallbackReal(&_opCtx, &_nss).parseWhere(query1.firstElement()));
BSONObj query2 = BSON("$where" << BSONCodeWScope(code, BSON("a" << true)));
- auto expr2 =
- unittest::assertGet(ExtensionsCallbackReal(&_txn, &_nss).parseWhere(query2.firstElement()));
+ auto expr2 = unittest::assertGet(
+ ExtensionsCallbackReal(&_opCtx, &_nss).parseWhere(query2.firstElement()));
ASSERT(expr1->equivalent(expr2.get()));
ASSERT(expr2->equivalent(expr1.get()));
@@ -297,12 +297,12 @@ TEST_F(ExtensionsCallbackRealTest, WhereExpressionsWithDifferentScopesAreNotEqui
const char code[] = "function(){ return a; }";
BSONObj query1 = BSON("$where" << BSONCodeWScope(code, BSON("a" << true)));
- auto expr1 =
- unittest::assertGet(ExtensionsCallbackReal(&_txn, &_nss).parseWhere(query1.firstElement()));
+ auto expr1 = unittest::assertGet(
+ ExtensionsCallbackReal(&_opCtx, &_nss).parseWhere(query1.firstElement()));
BSONObj query2 = BSON("$where" << BSONCodeWScope(code, BSON("a" << false)));
- auto expr2 =
- unittest::assertGet(ExtensionsCallbackReal(&_txn, &_nss).parseWhere(query2.firstElement()));
+ auto expr2 = unittest::assertGet(
+ ExtensionsCallbackReal(&_opCtx, &_nss).parseWhere(query2.firstElement()));
ASSERT_FALSE(expr1->equivalent(expr2.get()));
ASSERT_FALSE(expr2->equivalent(expr1.get()));
diff --git a/src/mongo/dbtests/framework.cpp b/src/mongo/dbtests/framework.cpp
index 68537ee5705..5a6e6099150 100644
--- a/src/mongo/dbtests/framework.cpp
+++ b/src/mongo/dbtests/framework.cpp
@@ -82,8 +82,8 @@ int runDbTests(int argc, char** argv) {
// DBTests run as if in the database, so allow them to create direct clients.
DBDirectClientFactory::get(globalServiceContext)
- .registerImplementation([](OperationContext* txn) {
- return std::unique_ptr<DBClientBase>(new DBDirectClient(txn));
+ .registerImplementation([](OperationContext* opCtx) {
+ return std::unique_ptr<DBClientBase>(new DBDirectClient(opCtx));
});
srand((unsigned)frameworkGlobalParams.seed);
diff --git a/src/mongo/dbtests/gle_test.cpp b/src/mongo/dbtests/gle_test.cpp
index 75233fb048d..1a3b1f9d2a5 100644
--- a/src/mongo/dbtests/gle_test.cpp
+++ b/src/mongo/dbtests/gle_test.cpp
@@ -50,9 +50,9 @@ static const char* const _ns = "unittests.gle";
class GetLastErrorCommandFailure {
public:
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient client(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient client(&opCtx);
client.insert(_ns,
BSON("test"
@@ -70,9 +70,9 @@ public:
class GetLastErrorClean {
public:
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient client(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient client(&opCtx);
client.insert(_ns,
BSON("test"
@@ -90,9 +90,9 @@ public:
class GetLastErrorFromDup {
public:
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient client(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient client(&opCtx);
client.insert(_ns, BSON("_id" << 1));
diff --git a/src/mongo/dbtests/indexcatalogtests.cpp b/src/mongo/dbtests/indexcatalogtests.cpp
index 068ede905a1..fd1f10f8a55 100644
--- a/src/mongo/dbtests/indexcatalogtests.cpp
+++ b/src/mongo/dbtests/indexcatalogtests.cpp
@@ -38,44 +38,44 @@ static const char* const _ns = "unittests.indexcatalog";
class IndexIteratorTests {
public:
IndexIteratorTests() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(_ns), MODE_X);
- OldClientContext ctx(&txn, _ns);
- WriteUnitOfWork wuow(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ ScopedTransaction transaction(&opCtx, MODE_IX);
+ Lock::DBLock lk(opCtx.lockState(), nsToDatabaseSubstring(_ns), MODE_X);
+ OldClientContext ctx(&opCtx, _ns);
+ WriteUnitOfWork wuow(&opCtx);
_db = ctx.db();
- _coll = _db->createCollection(&txn, _ns);
+ _coll = _db->createCollection(&opCtx, _ns);
_catalog = _coll->getIndexCatalog();
wuow.commit();
}
~IndexIteratorTests() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(_ns), MODE_X);
- OldClientContext ctx(&txn, _ns);
- WriteUnitOfWork wuow(&txn);
-
- _db->dropCollection(&txn, _ns);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ ScopedTransaction transaction(&opCtx, MODE_IX);
+ Lock::DBLock lk(opCtx.lockState(), nsToDatabaseSubstring(_ns), MODE_X);
+ OldClientContext ctx(&opCtx, _ns);
+ WriteUnitOfWork wuow(&opCtx);
+
+ _db->dropCollection(&opCtx, _ns);
wuow.commit();
}
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- OldClientWriteContext ctx(&txn, _ns);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ OldClientWriteContext ctx(&opCtx, _ns);
- int numFinishedIndexesStart = _catalog->numIndexesReady(&txn);
+ int numFinishedIndexesStart = _catalog->numIndexesReady(&opCtx);
- dbtests::createIndex(&txn, _ns, BSON("x" << 1));
- dbtests::createIndex(&txn, _ns, BSON("y" << 1));
+ dbtests::createIndex(&opCtx, _ns, BSON("x" << 1));
+ dbtests::createIndex(&opCtx, _ns, BSON("y" << 1));
- ASSERT_TRUE(_catalog->numIndexesReady(&txn) == numFinishedIndexesStart + 2);
+ ASSERT_TRUE(_catalog->numIndexesReady(&opCtx) == numFinishedIndexesStart + 2);
- IndexCatalog::IndexIterator ii = _catalog->getIndexIterator(&txn, false);
+ IndexCatalog::IndexIterator ii = _catalog->getIndexIterator(&opCtx, false);
int indexesIterated = 0;
bool foundIndex = false;
while (ii.more()) {
@@ -91,7 +91,7 @@ public:
}
}
- ASSERT_TRUE(indexesIterated == _catalog->numIndexesReady(&txn));
+ ASSERT_TRUE(indexesIterated == _catalog->numIndexesReady(&opCtx));
ASSERT_TRUE(foundIndex);
}
@@ -107,64 +107,64 @@ private:
class RefreshEntry {
public:
RefreshEntry() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(_ns), MODE_X);
- OldClientContext ctx(&txn, _ns);
- WriteUnitOfWork wuow(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ ScopedTransaction transaction(&opCtx, MODE_IX);
+ Lock::DBLock lk(opCtx.lockState(), nsToDatabaseSubstring(_ns), MODE_X);
+ OldClientContext ctx(&opCtx, _ns);
+ WriteUnitOfWork wuow(&opCtx);
_db = ctx.db();
- _coll = _db->createCollection(&txn, _ns);
+ _coll = _db->createCollection(&opCtx, _ns);
_catalog = _coll->getIndexCatalog();
wuow.commit();
}
~RefreshEntry() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(_ns), MODE_X);
- OldClientContext ctx(&txn, _ns);
- WriteUnitOfWork wuow(&txn);
-
- _db->dropCollection(&txn, _ns);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ ScopedTransaction transaction(&opCtx, MODE_IX);
+ Lock::DBLock lk(opCtx.lockState(), nsToDatabaseSubstring(_ns), MODE_X);
+ OldClientContext ctx(&opCtx, _ns);
+ WriteUnitOfWork wuow(&opCtx);
+
+ _db->dropCollection(&opCtx, _ns);
wuow.commit();
}
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- OldClientWriteContext ctx(&txn, _ns);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ OldClientWriteContext ctx(&opCtx, _ns);
const std::string indexName = "x_1";
ASSERT_OK(dbtests::createIndexFromSpec(
- &txn,
+ &opCtx,
_ns,
BSON("name" << indexName << "ns" << _ns << "key" << BSON("x" << 1) << "v"
<< static_cast<int>(kIndexVersion)
<< "expireAfterSeconds"
<< 5)));
- const IndexDescriptor* desc = _catalog->findIndexByName(&txn, indexName);
+ const IndexDescriptor* desc = _catalog->findIndexByName(&opCtx, indexName);
ASSERT(desc);
ASSERT_EQUALS(5, desc->infoObj()["expireAfterSeconds"].numberLong());
// Change value of "expireAfterSeconds" on disk.
{
- WriteUnitOfWork wuow(&txn);
- _coll->getCatalogEntry()->updateTTLSetting(&txn, "x_1", 10);
+ WriteUnitOfWork wuow(&opCtx);
+ _coll->getCatalogEntry()->updateTTLSetting(&opCtx, "x_1", 10);
wuow.commit();
}
// Verify that the catalog does not yet know of the change.
- desc = _catalog->findIndexByName(&txn, indexName);
+ desc = _catalog->findIndexByName(&opCtx, indexName);
ASSERT_EQUALS(5, desc->infoObj()["expireAfterSeconds"].numberLong());
{
// Notify the catalog of the change.
- WriteUnitOfWork wuow(&txn);
- desc = _catalog->refreshEntry(&txn, desc);
+ WriteUnitOfWork wuow(&opCtx);
+ desc = _catalog->refreshEntry(&opCtx, desc);
wuow.commit();
}
diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp
index 4f7b0fa1b19..f0ab83fb9f0 100644
--- a/src/mongo/dbtests/indexupdatetests.cpp
+++ b/src/mongo/dbtests/indexupdatetests.cpp
@@ -61,7 +61,7 @@ static const char* const _ns = "unittests.indexupdate";
*/
class IndexBuildBase {
public:
- IndexBuildBase() : _ctx(&_txn, _ns), _client(&_txn) {
+ IndexBuildBase() : _ctx(&_opCtx, _ns), _client(&_opCtx) {
_client.createCollection(_ns);
}
~IndexBuildBase() {
@@ -77,13 +77,13 @@ protected:
bool buildIndexInterrupted(const BSONObj& key, bool allowInterruption) {
try {
- MultiIndexBlock indexer(&_txn, collection());
+ MultiIndexBlock indexer(&_opCtx, collection());
if (allowInterruption)
indexer.allowInterruption();
uassertStatusOK(indexer.init(key));
uassertStatusOK(indexer.insertAllDocumentsInCollection());
- WriteUnitOfWork wunit(&_txn);
+ WriteUnitOfWork wunit(&_opCtx);
indexer.commit();
wunit.commit();
} catch (const DBException& e) {
@@ -96,7 +96,7 @@ protected:
}
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
OldClientWriteContext _ctx;
DBDirectClient _client;
};
@@ -110,17 +110,17 @@ public:
Database* db = _ctx.db();
Collection* coll;
{
- WriteUnitOfWork wunit(&_txn);
- db->dropCollection(&_txn, _ns);
- coll = db->createCollection(&_txn, _ns);
+ WriteUnitOfWork wunit(&_opCtx);
+ db->dropCollection(&_opCtx, _ns);
+ coll = db->createCollection(&_opCtx, _ns);
OpDebug* const nullOpDebug = nullptr;
- coll->insertDocument(&_txn,
+ coll->insertDocument(&_opCtx,
BSON("_id" << 1 << "a"
<< "dup"),
nullOpDebug,
true);
- coll->insertDocument(&_txn,
+ coll->insertDocument(&_opCtx,
BSON("_id" << 2 << "a"
<< "dup"),
nullOpDebug,
@@ -128,7 +128,7 @@ public:
wunit.commit();
}
- MultiIndexBlock indexer(&_txn, coll);
+ MultiIndexBlock indexer(&_opCtx, coll);
indexer.allowBackgroundBuilding();
indexer.allowInterruption();
indexer.ignoreUniqueConstraint();
@@ -149,7 +149,7 @@ public:
ASSERT_OK(indexer.init(spec).getStatus());
ASSERT_OK(indexer.insertAllDocumentsInCollection());
- WriteUnitOfWork wunit(&_txn);
+ WriteUnitOfWork wunit(&_opCtx);
indexer.commit();
wunit.commit();
}
@@ -164,17 +164,17 @@ public:
Database* db = _ctx.db();
Collection* coll;
{
- WriteUnitOfWork wunit(&_txn);
- db->dropCollection(&_txn, _ns);
- coll = db->createCollection(&_txn, _ns);
+ WriteUnitOfWork wunit(&_opCtx);
+ db->dropCollection(&_opCtx, _ns);
+ coll = db->createCollection(&_opCtx, _ns);
OpDebug* const nullOpDebug = nullptr;
- coll->insertDocument(&_txn,
+ coll->insertDocument(&_opCtx,
BSON("_id" << 1 << "a"
<< "dup"),
nullOpDebug,
true);
- coll->insertDocument(&_txn,
+ coll->insertDocument(&_opCtx,
BSON("_id" << 2 << "a"
<< "dup"),
nullOpDebug,
@@ -182,7 +182,7 @@ public:
wunit.commit();
}
- MultiIndexBlock indexer(&_txn, coll);
+ MultiIndexBlock indexer(&_opCtx, coll);
indexer.allowBackgroundBuilding();
indexer.allowInterruption();
// indexer.ignoreUniqueConstraint(); // not calling this
@@ -217,17 +217,17 @@ public:
RecordId loc1;
RecordId loc2;
{
- WriteUnitOfWork wunit(&_txn);
- db->dropCollection(&_txn, _ns);
- coll = db->createCollection(&_txn, _ns);
+ WriteUnitOfWork wunit(&_opCtx);
+ db->dropCollection(&_opCtx, _ns);
+ coll = db->createCollection(&_opCtx, _ns);
OpDebug* const nullOpDebug = nullptr;
- ASSERT_OK(coll->insertDocument(&_txn,
+ ASSERT_OK(coll->insertDocument(&_opCtx,
BSON("_id" << 1 << "a"
<< "dup"),
nullOpDebug,
true));
- ASSERT_OK(coll->insertDocument(&_txn,
+ ASSERT_OK(coll->insertDocument(&_opCtx,
BSON("_id" << 2 << "a"
<< "dup"),
nullOpDebug,
@@ -235,7 +235,7 @@ public:
wunit.commit();
}
- MultiIndexBlock indexer(&_txn, coll);
+ MultiIndexBlock indexer(&_opCtx, coll);
indexer.allowBackgroundBuilding();
indexer.allowInterruption();
// indexer.ignoreUniqueConstraint(); // not calling this
@@ -262,7 +262,7 @@ public:
ASSERT_EQUALS(dups.size(), 1U);
for (auto recordId : dups) {
ASSERT_NOT_EQUALS(recordId, RecordId());
- BSONObj obj = coll->docFor(&_txn, recordId).value();
+ BSONObj obj = coll->docFor(&_opCtx, recordId).value();
int id = obj["_id"].Int();
ASSERT(id == 1 || id == 2);
}
@@ -277,16 +277,16 @@ public:
Database* db = _ctx.db();
Collection* coll;
{
- WriteUnitOfWork wunit(&_txn);
- db->dropCollection(&_txn, _ns);
- coll = db->createCollection(&_txn, _ns);
+ WriteUnitOfWork wunit(&_opCtx);
+ db->dropCollection(&_opCtx, _ns);
+ coll = db->createCollection(&_opCtx, _ns);
// Drop all indexes including id index.
- coll->getIndexCatalog()->dropAllIndexes(&_txn, true);
+ coll->getIndexCatalog()->dropAllIndexes(&_opCtx, true);
// Insert some documents with enforceQuota=true.
int32_t nDocs = 1000;
OpDebug* const nullOpDebug = nullptr;
for (int32_t i = 0; i < nDocs; ++i) {
- coll->insertDocument(&_txn, BSON("a" << i), nullOpDebug, true);
+ coll->insertDocument(&_opCtx, BSON("a" << i), nullOpDebug, true);
}
wunit.commit();
}
@@ -301,7 +301,7 @@ public:
// only want to interrupt the index build
getGlobalServiceContext()->unsetKillAllOperations();
// The new index is not listed in the index catalog because the index build failed.
- ASSERT(!coll->getIndexCatalog()->findIndexByName(&_txn, "a_1"));
+ ASSERT(!coll->getIndexCatalog()->findIndexByName(&_opCtx, "a_1"));
}
};
@@ -313,15 +313,15 @@ public:
Database* db = _ctx.db();
Collection* coll;
{
- WriteUnitOfWork wunit(&_txn);
- db->dropCollection(&_txn, _ns);
- coll = db->createCollection(&_txn, _ns);
- coll->getIndexCatalog()->dropAllIndexes(&_txn, true);
+ WriteUnitOfWork wunit(&_opCtx);
+ db->dropCollection(&_opCtx, _ns);
+ coll = db->createCollection(&_opCtx, _ns);
+ coll->getIndexCatalog()->dropAllIndexes(&_opCtx, true);
// Insert some documents.
int32_t nDocs = 1000;
OpDebug* const nullOpDebug = nullptr;
for (int32_t i = 0; i < nDocs; ++i) {
- coll->insertDocument(&_txn, BSON("a" << i), nullOpDebug, true);
+ coll->insertDocument(&_opCtx, BSON("a" << i), nullOpDebug, true);
}
wunit.commit();
}
@@ -336,7 +336,7 @@ public:
// only want to interrupt the index build
getGlobalServiceContext()->unsetKillAllOperations();
// The new index is listed in the index catalog because the index build completed.
- ASSERT(coll->getIndexCatalog()->findIndexByName(&_txn, "a_1"));
+ ASSERT(coll->getIndexCatalog()->findIndexByName(&_opCtx, "a_1"));
}
};
@@ -348,18 +348,18 @@ public:
Database* db = _ctx.db();
Collection* coll;
{
- WriteUnitOfWork wunit(&_txn);
- db->dropCollection(&_txn, _ns);
+ WriteUnitOfWork wunit(&_opCtx);
+ db->dropCollection(&_opCtx, _ns);
CollectionOptions options;
options.capped = true;
options.cappedSize = 10 * 1024;
- coll = db->createCollection(&_txn, _ns, options);
- coll->getIndexCatalog()->dropAllIndexes(&_txn, true);
+ coll = db->createCollection(&_opCtx, _ns, options);
+ coll->getIndexCatalog()->dropAllIndexes(&_opCtx, true);
// Insert some documents.
int32_t nDocs = 1000;
OpDebug* const nullOpDebug = nullptr;
for (int32_t i = 0; i < nDocs; ++i) {
- coll->insertDocument(&_txn, BSON("_id" << i), nullOpDebug, true);
+ coll->insertDocument(&_opCtx, BSON("_id" << i), nullOpDebug, true);
}
wunit.commit();
}
@@ -374,7 +374,7 @@ public:
// only want to interrupt the index build
getGlobalServiceContext()->unsetKillAllOperations();
// The new index is not listed in the index catalog because the index build failed.
- ASSERT(!coll->getIndexCatalog()->findIndexByName(&_txn, "_id_"));
+ ASSERT(!coll->getIndexCatalog()->findIndexByName(&_opCtx, "_id_"));
}
};
@@ -386,18 +386,18 @@ public:
Database* db = _ctx.db();
Collection* coll;
{
- WriteUnitOfWork wunit(&_txn);
- db->dropCollection(&_txn, _ns);
+ WriteUnitOfWork wunit(&_opCtx);
+ db->dropCollection(&_opCtx, _ns);
CollectionOptions options;
options.capped = true;
options.cappedSize = 10 * 1024;
- coll = db->createCollection(&_txn, _ns, options);
- coll->getIndexCatalog()->dropAllIndexes(&_txn, true);
+ coll = db->createCollection(&_opCtx, _ns, options);
+ coll->getIndexCatalog()->dropAllIndexes(&_opCtx, true);
// Insert some documents.
int32_t nDocs = 1000;
OpDebug* const nullOpDebug = nullptr;
for (int32_t i = 0; i < nDocs; ++i) {
- coll->insertDocument(&_txn, BSON("_id" << i), nullOpDebug, true);
+ coll->insertDocument(&_opCtx, BSON("_id" << i), nullOpDebug, true);
}
wunit.commit();
}
@@ -412,7 +412,7 @@ public:
// only want to interrupt the index build
getGlobalServiceContext()->unsetKillAllOperations();
// The new index is listed in the index catalog because the index build succeeded.
- ASSERT(coll->getIndexCatalog()->findIndexByName(&_txn, "_id_"));
+ ASSERT(coll->getIndexCatalog()->findIndexByName(&_opCtx, "_id_"));
}
};
@@ -430,7 +430,7 @@ public:
// Request an interrupt.
getGlobalServiceContext()->setKillAllOperations();
// The call is not interrupted.
- Helpers::ensureIndex(&_txn, collection(), BSON("a" << 1), kIndexVersion, false, "a_1");
+ Helpers::ensureIndex(&_opCtx, collection(), BSON("a" << 1), kIndexVersion, false, "a_1");
// only want to interrupt the index build
getGlobalServiceContext()->unsetKillAllOperations();
// The new index is listed in getIndexSpecs because the index build completed.
@@ -439,7 +439,7 @@ public:
};
Status IndexBuildBase::createIndex(const std::string& dbname, const BSONObj& indexSpec) {
- MultiIndexBlock indexer(&_txn, collection());
+ MultiIndexBlock indexer(&_opCtx, collection());
Status status = indexer.init(indexSpec).getStatus();
if (status == ErrorCodes::IndexAlreadyExists) {
return Status::OK();
@@ -451,7 +451,7 @@ Status IndexBuildBase::createIndex(const std::string& dbname, const BSONObj& ind
if (!status.isOK()) {
return status;
}
- WriteUnitOfWork wunit(&_txn);
+ WriteUnitOfWork wunit(&_opCtx);
indexer.commit();
wunit.commit();
return Status::OK();
diff --git a/src/mongo/dbtests/jstests.cpp b/src/mongo/dbtests/jstests.cpp
index fe5c44fff0f..8569720e242 100644
--- a/src/mongo/dbtests/jstests.cpp
+++ b/src/mongo/dbtests/jstests.cpp
@@ -1002,9 +1002,9 @@ public:
string utf8ObjSpec = "{'_id':'\\u0001\\u007f\\u07ff\\uffff'}";
BSONObj utf8Obj = fromjson(utf8ObjSpec);
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient client(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient client(&opCtx);
client.insert(ns(), utf8Obj);
client.eval("unittest",
@@ -1023,9 +1023,9 @@ private:
}
void reset() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient client(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient client(&opCtx);
client.dropCollection(ns());
}
@@ -1047,9 +1047,9 @@ public:
if (!getGlobalScriptEngine()->utf8Ok())
return;
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient client(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient client(&opCtx);
client.eval("unittest",
"db.jstests.longutf8string.save( {_id:'\\uffff\\uffff\\uffff\\uffff'} )");
@@ -1057,9 +1057,9 @@ public:
private:
void reset() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient client(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient client(&opCtx);
client.dropCollection(ns());
}
@@ -1142,9 +1142,9 @@ public:
ServerGlobalParams::FeatureCompatibility::Version::k34);
// Drop the collection
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient client(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient client(&opCtx);
client.dropCollection("unittest.testroundtrip");
@@ -2251,9 +2251,9 @@ public:
update.appendCode("value",
"function () { db.test.find().forEach(function(obj) { continue; }); }");
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient client(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient client(&opCtx);
client.update("test.system.js", query.obj(), update.obj(), true /* upsert */);
unique_ptr<Scope> s(getGlobalScriptEngine()->newScope());
diff --git a/src/mongo/dbtests/matchertests.cpp b/src/mongo/dbtests/matchertests.cpp
index 73b5c728511..9a762f017a0 100644
--- a/src/mongo/dbtests/matchertests.cpp
+++ b/src/mongo/dbtests/matchertests.cpp
@@ -231,15 +231,15 @@ template <typename M>
class WhereSimple1 {
public:
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
const NamespaceString nss("unittests.matchertests");
- AutoGetCollectionForRead ctx(&txn, nss);
+ AutoGetCollectionForRead ctx(&opCtx, nss);
const CollatorInterface* collator = nullptr;
M m(BSON("$where"
<< "function(){ return this.a == 1; }"),
- ExtensionsCallbackReal(&txn, &nss),
+ ExtensionsCallbackReal(&opCtx, &nss),
collator);
ASSERT(m.matches(BSON("a" << 1)));
ASSERT(!m.matches(BSON("a" << 2)));
diff --git a/src/mongo/dbtests/mmaptests.cpp b/src/mongo/dbtests/mmaptests.cpp
index ab6766b72c3..b0afc3e916f 100644
--- a/src/mongo/dbtests/mmaptests.cpp
+++ b/src/mongo/dbtests/mmaptests.cpp
@@ -77,16 +77,16 @@ public:
MMAPV1LockerImpl lockState;
Lock::GlobalWrite lk(&lockState);
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
{
- DurableMappedFile f(txn.get());
- ON_BLOCK_EXIT([&f, &txn] {
- LockMongoFilesExclusive lock(txn.get());
- f.close(txn.get());
+ DurableMappedFile f(opCtx.get());
+ ON_BLOCK_EXIT([&f, &opCtx] {
+ LockMongoFilesExclusive lock(opCtx.get());
+ f.close(opCtx.get());
});
unsigned long long len = 256 * 1024 * 1024;
- verify(f.create(txn.get(), fn, len));
+ verify(f.create(opCtx.get(), fn, len));
{
char* p = (char*)f.getView();
verify(p);
@@ -99,12 +99,12 @@ public:
char* w = (char*)f.view_write();
strcpy(w + 6, "world");
}
- MongoFileFinder ff(txn.get());
+ MongoFileFinder ff(opCtx.get());
ASSERT(ff.findByPath(fn));
ASSERT(ff.findByPath("asdf") == 0);
}
{
- MongoFileFinder ff(txn.get());
+ MongoFileFinder ff(opCtx.get());
ASSERT(ff.findByPath(fn) == 0);
}
@@ -118,13 +118,14 @@ public:
Timer t;
for (int i = 0; i < N; i++) {
// Every 4 iterations we pass the sequential hint.
- DurableMappedFile f{
- txn.get(), i % 4 == 1 ? MongoFile::Options::SEQUENTIAL : MongoFile::Options::NONE};
- ON_BLOCK_EXIT([&f, &txn] {
- LockMongoFilesExclusive lock(txn.get());
- f.close(txn.get());
+ DurableMappedFile f{opCtx.get(),
+ i % 4 == 1 ? MongoFile::Options::SEQUENTIAL
+ : MongoFile::Options::NONE};
+ ON_BLOCK_EXIT([&f, &opCtx] {
+ LockMongoFilesExclusive lock(opCtx.get());
+ f.close(opCtx.get());
});
- verify(f.open(txn.get(), fn));
+ verify(f.open(opCtx.get(), fn));
{
char* p = (char*)f.getView();
verify(p);
diff --git a/src/mongo/dbtests/namespacetests.cpp b/src/mongo/dbtests/namespacetests.cpp
index c8a5be64b42..81c6b3125b0 100644
--- a/src/mongo/dbtests/namespacetests.cpp
+++ b/src/mongo/dbtests/namespacetests.cpp
@@ -69,11 +69,11 @@ namespace MissingFieldTests {
class BtreeIndexMissingField {
public:
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
BSONObj spec(BSON("key" << BSON("a" << 1)));
ASSERT_EQUALS(jstNULL,
- IndexLegacy::getMissingField(&txn, NULL, spec).firstElement().type());
+ IndexLegacy::getMissingField(&opCtx, NULL, spec).firstElement().type());
}
};
@@ -81,12 +81,12 @@ public:
class TwoDIndexMissingField {
public:
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
BSONObj spec(BSON("key" << BSON("a"
<< "2d")));
ASSERT_EQUALS(jstNULL,
- IndexLegacy::getMissingField(&txn, NULL, spec).firstElement().type());
+ IndexLegacy::getMissingField(&opCtx, NULL, spec).firstElement().type());
}
};
@@ -94,8 +94,8 @@ public:
class HashedIndexMissingField {
public:
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
BSONObj spec(BSON("key" << BSON("a"
<< "hashed")));
BSONObj nullObj = BSON("a" << BSONNULL);
@@ -109,7 +109,7 @@ public:
ASSERT_EQUALS(ExpressionKeysPrivate::makeSingleHashKey(nullObj.firstElement(), 0, 0),
nullFieldFromKey.Long());
- BSONObj missingField = IndexLegacy::getMissingField(&txn, NULL, spec);
+ BSONObj missingField = IndexLegacy::getMissingField(&opCtx, NULL, spec);
ASSERT_EQUALS(NumberLong, missingField.firstElement().type());
ASSERT_BSONELT_EQ(nullFieldFromKey, missingField.firstElement());
}
@@ -122,8 +122,8 @@ public:
class HashedIndexMissingFieldAlternateSeed {
public:
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
BSONObj spec(BSON("key" << BSON("a"
<< "hashed")
<< "seed"
@@ -141,7 +141,7 @@ public:
// Ensure that getMissingField recognizes that the seed is different (and returns
// the right key).
- BSONObj missingField = IndexLegacy::getMissingField(&txn, NULL, spec);
+ BSONObj missingField = IndexLegacy::getMissingField(&opCtx, NULL, spec);
ASSERT_EQUALS(NumberLong, missingField.firstElement().type());
ASSERT_BSONELT_EQ(nullFieldFromKey, missingField.firstElement());
}
@@ -159,16 +159,16 @@ namespace NamespaceDetailsTests {
public:
Base( const char *ns = "unittests.NamespaceDetailsTests" ) : ns_( ns ) , _context( ns ) {}
virtual ~Base() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr;
if ( !nsd() )
return;
- _context.db()->dropCollection( &txn, ns() );
+ _context.db()->dropCollection( &opCtx, ns() );
}
protected:
void create() {
Lock::GlobalWrite lk;
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); OperationContext& txn = *txnPtr;
- ASSERT( userCreateNS( &txn, db(), ns(), fromjson( spec() ), false ).isOK() );
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr;
+ ASSERT( userCreateNS( &opCtx, db(), ns(), fromjson( spec() ), false ).isOK() );
}
virtual string spec() const = 0;
int nRecords() const {
@@ -250,10 +250,10 @@ namespace NamespaceDetailsTests {
class SingleAlloc : public Base {
public:
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr;
create();
BSONObj b = bigObj();
- ASSERT( collection()->insertDocument( &txn, b, true ).isOK() );
+ ASSERT( collection()->insertDocument( &opCtx, b, true ).isOK() );
ASSERT_EQUALS( 1, nRecords() );
}
virtual string spec() const { return "{\"capped\":true,\"size\":512,\"$nExtents\":1}"; }
@@ -262,7 +262,7 @@ namespace NamespaceDetailsTests {
class Realloc : public Base {
public:
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr;
create();
const int N = 20;
@@ -272,7 +272,7 @@ namespace NamespaceDetailsTests {
for ( int i = 0; i < N; ++i ) {
BSONObj b = bigObj();
StatusWith<RecordId> status =
- ASSERT( collection()->insertDocument( &txn, b, true ).isOK() );
+ ASSERT( collection()->insertDocument( &opCtx, b, true ).isOK() );
l[ i ] = status.getValue();
ASSERT( !l[ i ].isNull() );
ASSERT( nRecords() <= Q );
@@ -287,14 +287,14 @@ namespace NamespaceDetailsTests {
class TwoExtent : public Base {
public:
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr;
create();
ASSERT_EQUALS( 2, nExtents() );
RecordId l[ 8 ];
for ( int i = 0; i < 8; ++i ) {
StatusWith<RecordId> status =
- ASSERT( collection()->insertDocument( &txn, bigObj(), true ).isOK() );
+ ASSERT( collection()->insertDocument( &opCtx, bigObj(), true ).isOK() );
l[ i ] = status.getValue();
ASSERT( !l[ i ].isNull() );
//ASSERT_EQUALS( i < 2 ? i + 1 : 3 + i % 2, nRecords() );
@@ -308,7 +308,7 @@ namespace NamespaceDetailsTests {
bob.appendOID( "_id", NULL, true );
bob.append( "a", string( MinExtentSize + 500, 'a' ) ); // min extent size is now 4096
BSONObj bigger = bob.done();
- ASSERT( !collection()->insertDocument( &txn, bigger, false ).isOK() );
+ ASSERT( !collection()->insertDocument( &opCtx, bigger, false ).isOK() );
ASSERT_EQUALS( 0, nRecords() );
}
private:
@@ -335,13 +335,13 @@ namespace NamespaceDetailsTests {
class AllocCappedNotQuantized : public Base {
public:
void run() {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr;
create();
ASSERT( nsd()->isCapped() );
ASSERT( !nsd()->isUserFlagSet( NamespaceDetails::Flag_UsePowerOf2Sizes ) );
StatusWith<RecordId> result =
- collection()->insertDocument( &txn, docForRecordSize( 300 ), false );
+ collection()->insertDocument( &opCtx, docForRecordSize( 300 ), false );
ASSERT( result.isOK() );
Record* record = collection()->getRecordStore()->recordFor( result.getValue() );
// Check that no quantization is performed.
@@ -358,7 +358,7 @@ namespace NamespaceDetailsTests {
return "{\"capped\":true,\"size\":512,\"$nExtents\":2}";
}
void pass(int p) {
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr;
create();
ASSERT_EQUALS( 2, nExtents() );
@@ -371,7 +371,7 @@ namespace NamespaceDetailsTests {
//RecordId l[ 8 ];
for ( int i = 0; i < N; ++i ) {
BSONObj bb = bigObj();
- StatusWith<RecordId> status = collection()->insertDocument( &txn, bb, true );
+ StatusWith<RecordId> status = collection()->insertDocument( &opCtx, bb, true );
ASSERT( status.isOK() );
RecordId a = status.getValue();
if( T == i )
@@ -385,7 +385,7 @@ namespace NamespaceDetailsTests {
RecordId last, first;
{
- unique_ptr<Runner> runner(InternalPlanner::collectionScan(&txn,
+ unique_ptr<Runner> runner(InternalPlanner::collectionScan(&opCtx,
ns(),
collection(),
InternalPlanner::BACKWARD));
@@ -393,7 +393,7 @@ namespace NamespaceDetailsTests {
ASSERT( !last.isNull() );
}
{
- unique_ptr<Runner> runner(InternalPlanner::collectionScan(&txn,
+ unique_ptr<Runner> runner(InternalPlanner::collectionScan(&opCtx,
ns(),
collection(),
InternalPlanner::FORWARD));
@@ -402,12 +402,12 @@ namespace NamespaceDetailsTests {
ASSERT( first != last ) ;
}
- collection()->cappedTruncateAfter(&txn, truncAt, false);
+ collection()->cappedTruncateAfter(&opCtx, truncAt, false);
ASSERT_EQUALS( collection()->numRecords() , 28u );
{
RecordId loc;
- unique_ptr<Runner> runner(InternalPlanner::collectionScan(&txn,
+ unique_ptr<Runner> runner(InternalPlanner::collectionScan(&opCtx,
ns(),
collection(),
InternalPlanner::FORWARD));
@@ -415,7 +415,7 @@ namespace NamespaceDetailsTests {
ASSERT( first == loc);
}
{
- unique_ptr<Runner> runner(InternalPlanner::collectionScan(&txn,
+ unique_ptr<Runner> runner(InternalPlanner::collectionScan(&opCtx,
ns(),
collection(),
InternalPlanner::BACKWARD));
@@ -430,7 +430,7 @@ namespace NamespaceDetailsTests {
bob.appendOID("_id", 0, true);
bob.append( "a", string( MinExtentSize + 300, 'a' ) );
BSONObj bigger = bob.done();
- ASSERT( !collection()->insertDocument( &txn, bigger, true ).isOK() );
+ ASSERT( !collection()->insertDocument( &opCtx, bigger, true ).isOK() );
ASSERT_EQUALS( 0, nRecords() );
}
public:
@@ -503,28 +503,28 @@ namespace NamespaceDetailsTests {
create();
NamespaceDetails *nsd = collection()->detailsWritable();
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext(); OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr;
// Set 2 & 54 as multikey
- nsd->setIndexIsMultikey(&txn, 2, true);
- nsd->setIndexIsMultikey(&txn, 54, true);
+ nsd->setIndexIsMultikey(&opCtx, 2, true);
+ nsd->setIndexIsMultikey(&opCtx, 54, true);
ASSERT(nsd->isMultikey(2));
ASSERT(nsd->isMultikey(54));
// Flip 2 & 47
- nsd->setIndexIsMultikey(&txn, 2, false);
- nsd->setIndexIsMultikey(&txn, 47, true);
+ nsd->setIndexIsMultikey(&opCtx, 2, false);
+ nsd->setIndexIsMultikey(&opCtx, 47, true);
ASSERT(!nsd->isMultikey(2));
ASSERT(nsd->isMultikey(47));
// Reset entries that are already true
- nsd->setIndexIsMultikey(&txn, 54, true);
- nsd->setIndexIsMultikey(&txn, 47, true);
+ nsd->setIndexIsMultikey(&opCtx, 54, true);
+ nsd->setIndexIsMultikey(&opCtx, 47, true);
ASSERT(nsd->isMultikey(54));
ASSERT(nsd->isMultikey(47));
// Two non-multi-key
- nsd->setIndexIsMultikey(&txn, 2, false);
- nsd->setIndexIsMultikey(&txn, 43, false);
+ nsd->setIndexIsMultikey(&opCtx, 2, false);
+ nsd->setIndexIsMultikey(&opCtx, 43, false);
ASSERT(!nsd->isMultikey(2));
ASSERT(nsd->isMultikey(54));
ASSERT(nsd->isMultikey(47));
@@ -544,21 +544,21 @@ public:
const string committedName = dbName + ".committed";
const string rolledBackName = dbName + ".rolled_back";
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock lk(txn.lockState(), dbName, MODE_X);
+ ScopedTransaction transaction(&opCtx, MODE_IX);
+ Lock::DBLock lk(opCtx.lockState(), dbName, MODE_X);
bool justCreated;
- Database* db = dbHolder().openDb(&txn, dbName, &justCreated);
+ Database* db = dbHolder().openDb(&opCtx, dbName, &justCreated);
ASSERT(justCreated);
Collection* committedColl;
{
- WriteUnitOfWork wunit(&txn);
+ WriteUnitOfWork wunit(&opCtx);
ASSERT_FALSE(db->getCollection(committedName));
- committedColl = db->createCollection(&txn, committedName);
+ committedColl = db->createCollection(&opCtx, committedName);
ASSERT_EQUALS(db->getCollection(committedName), committedColl);
wunit.commit();
}
@@ -566,9 +566,9 @@ public:
ASSERT_EQUALS(db->getCollection(committedName), committedColl);
{
- WriteUnitOfWork wunit(&txn);
+ WriteUnitOfWork wunit(&opCtx);
ASSERT_FALSE(db->getCollection(rolledBackName));
- Collection* rolledBackColl = db->createCollection(&txn, rolledBackName);
+ Collection* rolledBackColl = db->createCollection(&opCtx, rolledBackName);
ASSERT_EQUALS(db->getCollection(rolledBackName), rolledBackColl);
// not committing so creation should be rolled back
}
@@ -589,23 +589,23 @@ public:
const string droppedName = dbName + ".dropped";
const string rolledBackName = dbName + ".rolled_back";
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock lk(txn.lockState(), dbName, MODE_X);
+ ScopedTransaction transaction(&opCtx, MODE_IX);
+ Lock::DBLock lk(opCtx.lockState(), dbName, MODE_X);
bool justCreated;
- Database* db = dbHolder().openDb(&txn, dbName, &justCreated);
+ Database* db = dbHolder().openDb(&opCtx, dbName, &justCreated);
ASSERT(justCreated);
{
- WriteUnitOfWork wunit(&txn);
+ WriteUnitOfWork wunit(&opCtx);
ASSERT_FALSE(db->getCollection(droppedName));
Collection* droppedColl;
- droppedColl = db->createCollection(&txn, droppedName);
+ droppedColl = db->createCollection(&opCtx, droppedName);
ASSERT_EQUALS(db->getCollection(droppedName), droppedColl);
- db->dropCollection(&txn, droppedName);
+ db->dropCollection(&opCtx, droppedName);
wunit.commit();
}
@@ -613,12 +613,12 @@ public:
ASSERT_FALSE(db->getCollection(droppedName));
{
- WriteUnitOfWork wunit(&txn);
+ WriteUnitOfWork wunit(&opCtx);
ASSERT_FALSE(db->getCollection(rolledBackName));
- Collection* rolledBackColl = db->createCollection(&txn, rolledBackName);
+ Collection* rolledBackColl = db->createCollection(&opCtx, rolledBackName);
wunit.commit();
ASSERT_EQUALS(db->getCollection(rolledBackName), rolledBackColl);
- db->dropCollection(&txn, rolledBackName);
+ db->dropCollection(&opCtx, rolledBackName);
// not committing so dropping should be rolled back
}
diff --git a/src/mongo/dbtests/oplogstarttests.cpp b/src/mongo/dbtests/oplogstarttests.cpp
index 5dedeac260a..4070d0b5440 100644
--- a/src/mongo/dbtests/oplogstarttests.cpp
+++ b/src/mongo/dbtests/oplogstarttests.cpp
@@ -45,17 +45,17 @@ static const NamespaceString nss("unittests.oplogstarttests");
class Base {
public:
Base()
- : _scopedXact(&_txn, MODE_X),
- _lk(_txn.lockState()),
- _context(&_txn, nss.ns()),
- _client(&_txn) {
+ : _scopedXact(&_opCtx, MODE_X),
+ _lk(_opCtx.lockState()),
+ _context(&_opCtx, nss.ns()),
+ _client(&_opCtx) {
Collection* c = _context.db()->getCollection(nss.ns());
if (!c) {
- WriteUnitOfWork wuow(&_txn);
- c = _context.db()->createCollection(&_txn, nss.ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ c = _context.db()->createCollection(&_opCtx, nss.ns());
wuow.commit();
}
- ASSERT(c->getIndexCatalog()->haveIdIndex(&_txn));
+ ASSERT(c->getIndexCatalog()->haveIdIndex(&_opCtx));
}
~Base() {
@@ -78,11 +78,11 @@ protected:
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(query);
auto statusWithCQ = CanonicalQuery::canonicalize(
- &_txn, std::move(qr), ExtensionsCallbackDisallowExtensions());
+ &_opCtx, std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
_cq = std::move(statusWithCQ.getValue());
_oplogws.reset(new WorkingSet());
- _stage.reset(new OplogStart(&_txn, collection(), _cq->root(), _oplogws.get()));
+ _stage.reset(new OplogStart(&_opCtx, collection(), _cq->root(), _oplogws.get()));
}
void assertWorkingSetMemberHasId(WorkingSetID id, int expectedId) {
@@ -100,7 +100,7 @@ protected:
private:
// The order of these is important in order to ensure order of destruction
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
ScopedTransaction _scopedXact;
Lock::GlobalWrite _lk;
OldClientContext _context;
diff --git a/src/mongo/dbtests/pdfiletests.cpp b/src/mongo/dbtests/pdfiletests.cpp
index 711e8d6f9b8..59eb024b5c5 100644
--- a/src/mongo/dbtests/pdfiletests.cpp
+++ b/src/mongo/dbtests/pdfiletests.cpp
@@ -44,13 +44,13 @@ namespace PdfileTests {
namespace Insert {
class Base {
public:
- Base() : _scopedXact(&_txn, MODE_X), _lk(_txn.lockState()), _context(&_txn, ns()) {}
+ Base() : _scopedXact(&_opCtx, MODE_X), _lk(_opCtx.lockState()), _context(&_opCtx, ns()) {}
virtual ~Base() {
if (!collection())
return;
- WriteUnitOfWork wunit(&_txn);
- _context.db()->dropCollection(&_txn, ns());
+ WriteUnitOfWork wunit(&_opCtx);
+ _context.db()->dropCollection(&_opCtx, ns());
wunit.commit();
}
@@ -63,7 +63,7 @@ protected:
}
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
ScopedTransaction _scopedXact;
Lock::GlobalWrite _lk;
OldClientContext _context;
@@ -72,18 +72,18 @@ protected:
class InsertNoId : public Base {
public:
void run() {
- WriteUnitOfWork wunit(&_txn);
+ WriteUnitOfWork wunit(&_opCtx);
BSONObj x = BSON("x" << 1);
ASSERT(x["_id"].type() == 0);
- Collection* collection = _context.db()->getOrCreateCollection(&_txn, ns());
+ Collection* collection = _context.db()->getOrCreateCollection(&_opCtx, ns());
OpDebug* const nullOpDebug = nullptr;
- ASSERT(!collection->insertDocument(&_txn, x, nullOpDebug, true).isOK());
+ ASSERT(!collection->insertDocument(&_opCtx, x, nullOpDebug, true).isOK());
- StatusWith<BSONObj> fixed = fixDocumentForInsert(_txn.getServiceContext(), x);
+ StatusWith<BSONObj> fixed = fixDocumentForInsert(_opCtx.getServiceContext(), x);
ASSERT(fixed.isOK());
x = fixed.getValue();
ASSERT(x["_id"].type() == jstOID);
- ASSERT(collection->insertDocument(&_txn, x, nullOpDebug, true).isOK());
+ ASSERT(collection->insertDocument(&_opCtx, x, nullOpDebug, true).isOK());
wunit.commit();
}
};
@@ -96,7 +96,7 @@ public:
b.append("_id", 1);
BSONObj o = b.done();
- BSONObj fixed = fixDocumentForInsert(_txn.getServiceContext(), o).getValue();
+ BSONObj fixed = fixDocumentForInsert(_opCtx.getServiceContext(), o).getValue();
ASSERT_EQUALS(2, fixed.nFields());
ASSERT(fixed.firstElement().fieldNameStringData() == "_id");
ASSERT(fixed.firstElement().number() == 1);
@@ -121,7 +121,7 @@ public:
o = b.obj();
}
- BSONObj fixed = fixDocumentForInsert(_txn.getServiceContext(), o).getValue();
+ BSONObj fixed = fixDocumentForInsert(_opCtx.getServiceContext(), o).getValue();
ASSERT_EQUALS(3, fixed.nFields());
ASSERT(fixed.firstElement().fieldNameStringData() == "_id");
ASSERT(fixed.firstElement().number() == 1);
@@ -143,12 +143,12 @@ public:
class ValidId : public Base {
public:
void run() {
- ASSERT(fixDocumentForInsert(_txn.getServiceContext(), BSON("_id" << 5)).isOK());
+ ASSERT(fixDocumentForInsert(_opCtx.getServiceContext(), BSON("_id" << 5)).isOK());
ASSERT(
- fixDocumentForInsert(_txn.getServiceContext(), BSON("_id" << BSON("x" << 5))).isOK());
- ASSERT(
- !fixDocumentForInsert(_txn.getServiceContext(), BSON("_id" << BSON("$x" << 5))).isOK());
- ASSERT(!fixDocumentForInsert(_txn.getServiceContext(), BSON("_id" << BSON("$oid" << 5)))
+ fixDocumentForInsert(_opCtx.getServiceContext(), BSON("_id" << BSON("x" << 5))).isOK());
+ ASSERT(!fixDocumentForInsert(_opCtx.getServiceContext(), BSON("_id" << BSON("$x" << 5)))
+ .isOK());
+ ASSERT(!fixDocumentForInsert(_opCtx.getServiceContext(), BSON("_id" << BSON("$oid" << 5)))
.isOK());
}
};
diff --git a/src/mongo/dbtests/perftests.cpp b/src/mongo/dbtests/perftests.cpp
index 8c761037950..042b3629b5d 100644
--- a/src/mongo/dbtests/perftests.cpp
+++ b/src/mongo/dbtests/perftests.cpp
@@ -82,11 +82,11 @@ const bool profiling = false;
class ClientBase {
public:
- ClientBase() : _client(&_txn) {
- mongo::LastError::get(_txn.getClient()).reset();
+ ClientBase() : _client(&_opCtx) {
+ mongo::LastError::get(_opCtx.getClient()).reset();
}
virtual ~ClientBase() {
- mongo::LastError::get(_txn.getClient()).reset();
+ mongo::LastError::get(_opCtx.getClient()).reset();
}
protected:
@@ -103,13 +103,13 @@ protected:
DBClientBase* client() {
return &_client;
}
- OperationContext* txn() {
- return &_txn;
+ OperationContext* opCtx() {
+ return &_opCtx;
}
private:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
DBDirectClient _client;
};
@@ -332,9 +332,9 @@ public:
srand(++z ^ (unsigned)time(0));
#endif
Client::initThreadIfNotAlready("perftestthr");
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- DBDirectClient c(&txn);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ DBDirectClient c(&opCtx);
const unsigned int Batch = batchSize();
prepThreaded();
diff --git a/src/mongo/dbtests/plan_ranking.cpp b/src/mongo/dbtests/plan_ranking.cpp
index 031a95d807a..44e2ddfba31 100644
--- a/src/mongo/dbtests/plan_ranking.cpp
+++ b/src/mongo/dbtests/plan_ranking.cpp
@@ -74,14 +74,14 @@ public:
PlanRankingTestBase()
: _internalQueryForceIntersectionPlans(internalQueryForceIntersectionPlans.load()),
_enableHashIntersection(internalQueryPlannerEnableHashIntersection.load()),
- _client(&_txn) {
+ _client(&_opCtx) {
// Run all tests with hash-based intersection enabled.
internalQueryPlannerEnableHashIntersection.store(true);
// Ensure N is significantly larger then internalQueryPlanEvaluationWorks.
ASSERT_GTE(N, internalQueryPlanEvaluationWorks.load() + 1000);
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
_client.dropCollection(nss.ns());
}
@@ -92,12 +92,12 @@ public:
}
void insert(const BSONObj& obj) {
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
_client.insert(nss.ns(), obj);
}
void addIndex(const BSONObj& obj) {
- ASSERT_OK(dbtests::createIndex(&_txn, nss.ns(), obj));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, nss.ns(), obj));
}
/**
@@ -107,11 +107,11 @@ public:
* Does NOT take ownership of 'cq'. Caller DOES NOT own the returned QuerySolution*.
*/
QuerySolution* pickBestPlan(CanonicalQuery* cq) {
- AutoGetCollectionForRead ctx(&_txn, nss);
+ AutoGetCollectionForRead ctx(&_opCtx, nss);
Collection* collection = ctx.getCollection();
QueryPlannerParams plannerParams;
- fillOutPlannerParams(&_txn, collection, cq, &plannerParams);
+ fillOutPlannerParams(&_opCtx, collection, cq, &plannerParams);
// Turn this off otherwise it pops up in some plans.
plannerParams.options &= ~QueryPlannerParams::KEEP_MUTATIONS;
@@ -123,18 +123,18 @@ public:
ASSERT_GREATER_THAN_OR_EQUALS(solutions.size(), 1U);
// Fill out the MPR.
- _mps.reset(new MultiPlanStage(&_txn, collection, cq));
+ _mps.reset(new MultiPlanStage(&_opCtx, collection, cq));
unique_ptr<WorkingSet> ws(new WorkingSet());
// Put each solution from the planner into the MPR.
for (size_t i = 0; i < solutions.size(); ++i) {
PlanStage* root;
- ASSERT(StageBuilder::build(&_txn, collection, *cq, *solutions[i], ws.get(), &root));
+ ASSERT(StageBuilder::build(&_opCtx, collection, *cq, *solutions[i], ws.get(), &root));
// Takes ownership of all (actually some) arguments.
_mps->addPlan(solutions[i], root, ws.get());
}
// This is what sets a backup plan, should we test for it.
PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL,
- _txn.getServiceContext()->getFastClockSource());
+ _opCtx.getServiceContext()->getFastClockSource());
_mps->pickBestPlan(&yieldPolicy);
ASSERT(_mps->bestPlanChosen());
@@ -153,8 +153,8 @@ public:
return _mps->hasBackupPlan();
}
- OperationContext* txn() {
- return &_txn;
+ OperationContext* opCtx() {
+ return &_opCtx;
}
protected:
@@ -164,7 +164,7 @@ protected:
const int N = 12000;
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
private:
// Holds the value of global "internalQueryForceIntersectionPlans" setParameter flag.
@@ -202,7 +202,7 @@ public:
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(BSON("a" << 100 << "b" << 1));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions());
verify(statusWithCQ.isOK());
cq = std::move(statusWithCQ.getValue());
ASSERT(cq.get());
@@ -222,7 +222,7 @@ public:
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(BSON("a" << 100 << "b" << 1));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions());
verify(statusWithCQ.isOK());
cq = std::move(statusWithCQ.getValue());
}
@@ -258,7 +258,7 @@ public:
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(BSON("a" << 1 << "b" << BSON("$gt" << 1)));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions());
verify(statusWithCQ.isOK());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(NULL != cq.get());
@@ -300,7 +300,7 @@ public:
qr->setFilter(BSON("a" << 27));
qr->setProj(BSON("_id" << 0 << "a" << 1 << "b" << 1));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(NULL != cq.get());
@@ -335,7 +335,7 @@ public:
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(BSON("a" << 1 << "b" << 1 << "c" << 99));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(NULL != cq.get());
@@ -375,7 +375,7 @@ public:
qr->setProj(BSON("_id" << 0 << "a" << 1 << "b" << 1));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(NULL != cq.get());
@@ -409,7 +409,7 @@ public:
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(BSON("a" << N + 1 << "b" << 1));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions());
verify(statusWithCQ.isOK());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(NULL != cq.get());
@@ -446,7 +446,7 @@ public:
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(BSON("a" << BSON("$gte" << N + 1) << "b" << 1));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions());
verify(statusWithCQ.isOK());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(NULL != cq.get());
@@ -477,7 +477,7 @@ public:
qr->setFilter(BSON("_id" << BSON("$gte" << 20 << "$lte" << 200)));
qr->setSort(BSON("c" << 1));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -507,7 +507,7 @@ public:
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(BSON("foo" << 2001));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions());
verify(statusWithCQ.isOK());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(NULL != cq.get());
@@ -542,7 +542,7 @@ public:
qr->setFilter(BSON("a" << 1));
qr->setSort(BSON("d" << 1));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(NULL != cq.get());
@@ -580,7 +580,7 @@ public:
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson("{a: 1, b: 1, c: {$gte: 5000}}"));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(NULL != cq.get());
@@ -613,7 +613,7 @@ public:
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson("{a: 9, b: {$ne: 10}, c: 9}"));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(NULL != cq.get());
diff --git a/src/mongo/dbtests/query_plan_executor.cpp b/src/mongo/dbtests/query_plan_executor.cpp
index 71492c66683..7265c0e8dbc 100644
--- a/src/mongo/dbtests/query_plan_executor.cpp
+++ b/src/mongo/dbtests/query_plan_executor.cpp
@@ -64,14 +64,14 @@ static const NamespaceString nss("unittests.QueryPlanExecutor");
class PlanExecutorBase {
public:
- PlanExecutorBase() : _client(&_txn) {}
+ PlanExecutorBase() : _client(&_opCtx) {}
virtual ~PlanExecutorBase() {
_client.dropCollection(nss.ns());
}
void addIndex(const BSONObj& obj) {
- ASSERT_OK(dbtests::createIndex(&_txn, nss.ns(), obj));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, nss.ns(), obj));
}
void insert(const BSONObj& obj) {
@@ -107,17 +107,22 @@ public:
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(filterObj);
auto statusWithCQ = CanonicalQuery::canonicalize(
- &_txn, std::move(qr), ExtensionsCallbackDisallowExtensions());
+ &_opCtx, std::move(qr), ExtensionsCallbackDisallowExtensions());
verify(statusWithCQ.isOK());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
verify(NULL != cq.get());
// Make the stage.
- unique_ptr<PlanStage> root(new CollectionScan(&_txn, csparams, ws.get(), cq.get()->root()));
+ unique_ptr<PlanStage> root(
+ new CollectionScan(&_opCtx, csparams, ws.get(), cq.get()->root()));
// Hand the plan off to the executor.
- auto statusWithPlanExecutor = PlanExecutor::make(
- &_txn, std::move(ws), std::move(root), std::move(cq), coll, PlanExecutor::YIELD_MANUAL);
+ auto statusWithPlanExecutor = PlanExecutor::make(&_opCtx,
+ std::move(ws),
+ std::move(root),
+ std::move(cq),
+ coll,
+ PlanExecutor::YIELD_MANUAL);
ASSERT_OK(statusWithPlanExecutor.getStatus());
return statusWithPlanExecutor.getValue().release();
}
@@ -148,25 +153,29 @@ public:
const Collection* coll = db->getCollection(nss.ns());
unique_ptr<WorkingSet> ws(new WorkingSet());
- IndexScan* ix = new IndexScan(&_txn, ixparams, ws.get(), NULL);
- unique_ptr<PlanStage> root(new FetchStage(&_txn, ws.get(), ix, NULL, coll));
+ IndexScan* ix = new IndexScan(&_opCtx, ixparams, ws.get(), NULL);
+ unique_ptr<PlanStage> root(new FetchStage(&_opCtx, ws.get(), ix, NULL, coll));
auto qr = stdx::make_unique<QueryRequest>(nss);
auto statusWithCQ = CanonicalQuery::canonicalize(
- &_txn, std::move(qr), ExtensionsCallbackDisallowExtensions());
+ &_opCtx, std::move(qr), ExtensionsCallbackDisallowExtensions());
verify(statusWithCQ.isOK());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
verify(NULL != cq.get());
// Hand the plan off to the executor.
- auto statusWithPlanExecutor = PlanExecutor::make(
- &_txn, std::move(ws), std::move(root), std::move(cq), coll, PlanExecutor::YIELD_MANUAL);
+ auto statusWithPlanExecutor = PlanExecutor::make(&_opCtx,
+ std::move(ws),
+ std::move(root),
+ std::move(cq),
+ coll,
+ PlanExecutor::YIELD_MANUAL);
ASSERT_OK(statusWithPlanExecutor.getStatus());
return statusWithPlanExecutor.getValue().release();
}
size_t numCursors() {
- AutoGetCollectionForRead ctx(&_txn, nss);
+ AutoGetCollectionForRead ctx(&_opCtx, nss);
Collection* collection = ctx.getCollection();
if (!collection)
return 0;
@@ -175,31 +184,31 @@ public:
void registerExec(PlanExecutor* exec) {
// TODO: This is not correct (create collection under S-lock)
- AutoGetCollectionForRead ctx(&_txn, nss);
- WriteUnitOfWork wunit(&_txn);
- Collection* collection = ctx.getDb()->getOrCreateCollection(&_txn, nss.ns());
+ AutoGetCollectionForRead ctx(&_opCtx, nss);
+ WriteUnitOfWork wunit(&_opCtx);
+ Collection* collection = ctx.getDb()->getOrCreateCollection(&_opCtx, nss.ns());
collection->getCursorManager()->registerExecutor(exec);
wunit.commit();
}
void deregisterExec(PlanExecutor* exec) {
// TODO: This is not correct (create collection under S-lock)
- AutoGetCollectionForRead ctx(&_txn, nss);
- WriteUnitOfWork wunit(&_txn);
- Collection* collection = ctx.getDb()->getOrCreateCollection(&_txn, nss.ns());
+ AutoGetCollectionForRead ctx(&_opCtx, nss);
+ WriteUnitOfWork wunit(&_opCtx);
+ Collection* collection = ctx.getDb()->getOrCreateCollection(&_opCtx, nss.ns());
collection->getCursorManager()->deregisterExecutor(exec);
wunit.commit();
}
protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
private:
IndexDescriptor* getIndex(Database* db, const BSONObj& obj) {
Collection* collection = db->getCollection(nss.ns());
std::vector<IndexDescriptor*> indexes;
- collection->getIndexCatalog()->findIndexesByKeyPattern(&_txn, obj, false, &indexes);
+ collection->getIndexCatalog()->findIndexesByKeyPattern(&_opCtx, obj, false, &indexes);
ASSERT_LTE(indexes.size(), 1U);
return indexes.size() == 0 ? nullptr : indexes[0];
}
@@ -214,7 +223,7 @@ private:
class DropCollScan : public PlanExecutorBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
insert(BSON("_id" << 1));
insert(BSON("_id" << 2));
@@ -242,7 +251,7 @@ public:
class DropIndexScan : public PlanExecutorBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
insert(BSON("_id" << 1 << "a" << 6));
insert(BSON("_id" << 2 << "a" << 7));
insert(BSON("_id" << 3 << "a" << 8));
@@ -270,7 +279,7 @@ public:
class DropIndexScanAgg : public PlanExecutorBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
insert(BSON("_id" << 1 << "a" << 6));
insert(BSON("_id" << 2 << "a" << 7));
@@ -283,7 +292,7 @@ public:
// Create the aggregation pipeline.
std::vector<BSONObj> rawPipeline = {fromjson("{$match: {a: {$gte: 7, $lte: 10}}}")};
boost::intrusive_ptr<ExpressionContextForTest> expCtx =
- new ExpressionContextForTest(&_txn, AggregationRequest(nss, rawPipeline));
+ new ExpressionContextForTest(&_opCtx, AggregationRequest(nss, rawPipeline));
// Create an "inner" plan executor and register it with the cursor manager so that it can
// get notified when the collection is dropped.
@@ -299,10 +308,10 @@ public:
// Create the output PlanExecutor that pulls results from the pipeline.
auto ws = make_unique<WorkingSet>();
- auto proxy = make_unique<PipelineProxyStage>(&_txn, pipeline, ws.get());
+ auto proxy = make_unique<PipelineProxyStage>(&_opCtx, pipeline, ws.get());
auto statusWithPlanExecutor = PlanExecutor::make(
- &_txn, std::move(ws), std::move(proxy), collection, PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(proxy), collection, PlanExecutor::YIELD_MANUAL);
ASSERT_OK(statusWithPlanExecutor.getStatus());
unique_ptr<PlanExecutor> outerExec = std::move(statusWithPlanExecutor.getValue());
@@ -377,7 +386,7 @@ protected:
class SnapshotControl : public SnapshotBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
setupCollection();
BSONObj filterObj = fromjson("{a: {$gte: 2}}");
@@ -404,7 +413,7 @@ public:
class SnapshotTest : public SnapshotBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
setupCollection();
BSONObj indexSpec = BSON("_id" << 1);
addIndex(indexSpec);
@@ -435,7 +444,7 @@ using mongo::ClientCursor;
class Invalidate : public PlanExecutorBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
insert(BSON("a" << 1 << "b" << 1));
BSONObj filterObj = fromjson("{_id: {$gt: 0}, b: {$gt: 0}}");
@@ -461,7 +470,7 @@ public:
class InvalidatePinned : public PlanExecutorBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
insert(BSON("a" << 1 << "b" << 1));
Collection* collection = ctx.getCollection();
@@ -501,12 +510,12 @@ class Timeout : public PlanExecutorBase {
public:
void run() {
{
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
insert(BSON("a" << 1 << "b" << 1));
}
{
- AutoGetCollectionForRead ctx(&_txn, nss);
+ AutoGetCollectionForRead ctx(&_opCtx, nss);
Collection* collection = ctx.getCollection();
BSONObj filterObj = fromjson("{_id: {$gt: 0}, b: {$gt: 0}}");
@@ -519,7 +528,7 @@ public:
// There should be one cursor before timeout,
// and zero cursors after timeout.
ASSERT_EQUALS(1U, numCursors());
- CursorManager::timeoutCursorsGlobal(&_txn, 600001);
+ CursorManager::timeoutCursorsGlobal(&_opCtx, 600001);
ASSERT_EQUALS(0U, numCursors());
}
};
diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp
index 340101f66fa..fa4961ce4eb 100644
--- a/src/mongo/dbtests/query_stage_and.cpp
+++ b/src/mongo/dbtests/query_stage_and.cpp
@@ -61,19 +61,19 @@ using stdx::make_unique;
class QueryStageAndBase {
public:
- QueryStageAndBase() : _client(&_txn) {}
+ QueryStageAndBase() : _client(&_opCtx) {}
virtual ~QueryStageAndBase() {
_client.dropCollection(ns());
}
void addIndex(const BSONObj& obj) {
- ASSERT_OK(dbtests::createIndex(&_txn, ns(), obj));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), obj));
}
IndexDescriptor* getIndex(const BSONObj& obj, Collection* coll) {
std::vector<IndexDescriptor*> indexes;
- coll->getIndexCatalog()->findIndexesByKeyPattern(&_txn, obj, false, &indexes);
+ coll->getIndexCatalog()->findIndexesByKeyPattern(&_opCtx, obj, false, &indexes);
if (indexes.empty()) {
FAIL(mongoutils::str::stream() << "Unable to find index with key pattern " << obj);
}
@@ -81,7 +81,7 @@ public:
}
void getRecordIds(set<RecordId>* out, Collection* coll) {
- auto cursor = coll->getCursor(&_txn);
+ auto cursor = coll->getCursor(&_opCtx);
while (auto record = cursor->next()) {
out->insert(record->id);
}
@@ -151,7 +151,7 @@ public:
protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
private:
DBDirectClient _client;
@@ -168,12 +168,12 @@ private:
class QueryStageAndHashInvalidation : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -185,7 +185,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = make_unique<AndHashStage>(&_txn, &ws, coll);
+ auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll);
// Foo <= 20
IndexScanParams params;
@@ -195,7 +195,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Bar >= 10
params.descriptor = getIndex(BSON("bar" << 1), coll);
@@ -203,7 +203,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// ah reads the first child into its hash table.
// ah should read foo=20, foo=19, ..., foo=0 in that order.
@@ -221,9 +221,9 @@ public:
getRecordIds(&data, coll);
size_t memUsageBefore = ah->getMemUsage();
for (set<RecordId>::const_iterator it = data.begin(); it != data.end(); ++it) {
- if (coll->docFor(&_txn, *it).value()["foo"].numberInt() == 15) {
- ah->invalidate(&_txn, *it, INVALIDATION_DELETION);
- remove(coll->docFor(&_txn, *it).value());
+ if (coll->docFor(&_opCtx, *it).value()["foo"].numberInt() == 15) {
+ ah->invalidate(&_opCtx, *it, INVALIDATION_DELETION);
+ remove(coll->docFor(&_opCtx, *it).value());
break;
}
}
@@ -273,12 +273,12 @@ public:
class QueryStageAndHashInvalidateLookahead : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -291,7 +291,7 @@ public:
addIndex(BSON("baz" << 1));
WorkingSet ws;
- auto ah = make_unique<AndHashStage>(&_txn, &ws, coll);
+ auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll);
// Foo <= 20 (descending)
IndexScanParams params;
@@ -301,12 +301,12 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Bar <= 19 (descending)
params.descriptor = getIndex(BSON("bar" << 1), coll);
params.bounds.startKey = BSON("" << 19);
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// First call to work reads the first result from the children.
// The first result is for the first scan over foo is {foo: 20, bar: 20, baz: 20}.
@@ -326,8 +326,8 @@ public:
size_t memUsageBefore = ah->getMemUsage();
for (set<RecordId>::const_iterator it = data.begin(); it != data.end(); ++it) {
- if (0 == deletedObj.woCompare(coll->docFor(&_txn, *it).value())) {
- ah->invalidate(&_txn, *it, INVALIDATION_DELETION);
+ if (0 == deletedObj.woCompare(coll->docFor(&_opCtx, *it).value())) {
+ ah->invalidate(&_opCtx, *it, INVALIDATION_DELETION);
break;
}
}
@@ -350,7 +350,8 @@ public:
continue;
}
WorkingSetMember* wsm = ws.get(id);
- ASSERT_NOT_EQUALS(0, deletedObj.woCompare(coll->docFor(&_txn, wsm->recordId).value()));
+ ASSERT_NOT_EQUALS(0,
+ deletedObj.woCompare(coll->docFor(&_opCtx, wsm->recordId).value()));
++count;
}
@@ -362,12 +363,12 @@ public:
class QueryStageAndHashTwoLeaf : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -379,7 +380,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = make_unique<AndHashStage>(&_txn, &ws, coll);
+ auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll);
// Foo <= 20
IndexScanParams params;
@@ -389,7 +390,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Bar >= 10
params.descriptor = getIndex(BSON("bar" << 1), coll);
@@ -397,7 +398,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// foo == bar == baz, and foo<=20, bar>=10, so our values are:
// foo == 10, 11, 12, 13, 14, 15. 16, 17, 18, 19, 20
@@ -412,12 +413,12 @@ public:
class QueryStageAndHashTwoLeafFirstChildLargeKeys : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -434,7 +435,7 @@ public:
// before hashed AND is done reading the first child (stage has to
// hold 21 keys in buffer for Foo <= 20).
WorkingSet ws;
- auto ah = make_unique<AndHashStage>(&_txn, &ws, coll, 20 * big.size());
+ auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll, 20 * big.size());
// Foo <= 20
IndexScanParams params;
@@ -444,7 +445,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Bar >= 10
params.descriptor = getIndex(BSON("bar" << 1), coll);
@@ -452,7 +453,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Stage execution should fail.
ASSERT_EQUALS(-1, countResults(ah.get()));
@@ -465,12 +466,12 @@ public:
class QueryStageAndHashTwoLeafLastChildLargeKeys : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -487,7 +488,7 @@ public:
// keys in last child's index are not buffered. There are 6 keys
// that satisfy the criteria Foo <= 20 and Bar >= 10 and 5 <= baz <= 15.
WorkingSet ws;
- auto ah = make_unique<AndHashStage>(&_txn, &ws, coll, 5 * big.size());
+ auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll, 5 * big.size());
// Foo <= 20
IndexScanParams params;
@@ -497,7 +498,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Bar >= 10
params.descriptor = getIndex(BSON("bar" << 1 << "big" << 1), coll);
@@ -505,7 +506,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// foo == bar == baz, and foo<=20, bar>=10, so our values are:
// foo == 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20.
@@ -517,12 +518,12 @@ public:
class QueryStageAndHashThreeLeaf : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -535,7 +536,7 @@ public:
addIndex(BSON("baz" << 1));
WorkingSet ws;
- auto ah = make_unique<AndHashStage>(&_txn, &ws, coll);
+ auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll);
// Foo <= 20
IndexScanParams params;
@@ -545,7 +546,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Bar >= 10
params.descriptor = getIndex(BSON("bar" << 1), coll);
@@ -553,7 +554,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// 5 <= baz <= 15
params.descriptor = getIndex(BSON("baz" << 1), coll);
@@ -561,7 +562,7 @@ public:
params.bounds.endKey = BSON("" << 15);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// foo == bar == baz, and foo<=20, bar>=10, 5<=baz<=15, so our values are:
// foo == 10, 11, 12, 13, 14, 15.
@@ -579,12 +580,12 @@ public:
class QueryStageAndHashThreeLeafMiddleChildLargeKeys : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -602,7 +603,7 @@ public:
// before hashed AND is done reading the second child (stage has to
// hold 11 keys in buffer for Foo <= 20 and Bar >= 10).
WorkingSet ws;
- auto ah = make_unique<AndHashStage>(&_txn, &ws, coll, 10 * big.size());
+ auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll, 10 * big.size());
// Foo <= 20
IndexScanParams params;
@@ -612,7 +613,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Bar >= 10
params.descriptor = getIndex(BSON("bar" << 1 << "big" << 1), coll);
@@ -620,7 +621,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// 5 <= baz <= 15
params.descriptor = getIndex(BSON("baz" << 1), coll);
@@ -628,7 +629,7 @@ public:
params.bounds.endKey = BSON("" << 15);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Stage execution should fail.
ASSERT_EQUALS(-1, countResults(ah.get()));
@@ -639,12 +640,12 @@ public:
class QueryStageAndHashWithNothing : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -656,7 +657,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = make_unique<AndHashStage>(&_txn, &ws, coll);
+ auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll);
// Foo <= 20
IndexScanParams params;
@@ -666,7 +667,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Bar == 5. Index scan should be eof.
params.descriptor = getIndex(BSON("bar" << 1), coll);
@@ -674,7 +675,7 @@ public:
params.bounds.endKey = BSON("" << 5);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
int count = 0;
int works = 0;
@@ -701,12 +702,12 @@ public:
class QueryStageAndHashProducesNothing : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -719,7 +720,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = make_unique<AndHashStage>(&_txn, &ws, coll);
+ auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll);
// Foo >= 100
IndexScanParams params;
@@ -729,7 +730,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Bar <= 100
params.descriptor = getIndex(BSON("bar" << 1), coll);
@@ -741,7 +742,7 @@ public:
<< "");
params.bounds.boundInclusion = BoundInclusion::kIncludeStartKeyOnly;
params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
ASSERT_EQUALS(0, countResults(ah.get()));
}
@@ -754,12 +755,12 @@ public:
class QueryStageAndHashFirstChildFetched : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -771,7 +772,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = make_unique<AndHashStage>(&_txn, &ws, coll);
+ auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll);
// Foo <= 20
IndexScanParams params;
@@ -781,11 +782,11 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = -1;
- IndexScan* firstScan = new IndexScan(&_txn, params, &ws, NULL);
+ IndexScan* firstScan = new IndexScan(&_opCtx, params, &ws, NULL);
// First child of the AND_HASH stage is a Fetch. The NULL in the
// constructor means there is no filter.
- FetchStage* fetch = new FetchStage(&_txn, &ws, firstScan, NULL, coll);
+ FetchStage* fetch = new FetchStage(&_opCtx, &ws, firstScan, NULL, coll);
ah->addChild(fetch);
// Bar >= 10
@@ -794,7 +795,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Check that the AndHash stage returns docs {foo: 10, bar: 10}
// through {foo: 20, bar: 20}.
@@ -813,12 +814,12 @@ public:
class QueryStageAndHashSecondChildFetched : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -830,7 +831,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = make_unique<AndHashStage>(&_txn, &ws, coll);
+ auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll);
// Foo <= 20
IndexScanParams params;
@@ -840,7 +841,7 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = -1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Bar >= 10
params.descriptor = getIndex(BSON("bar" << 1), coll);
@@ -848,11 +849,11 @@ public:
params.bounds.endKey = BSONObj();
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- IndexScan* secondScan = new IndexScan(&_txn, params, &ws, NULL);
+ IndexScan* secondScan = new IndexScan(&_opCtx, params, &ws, NULL);
// Second child of the AND_HASH stage is a Fetch. The NULL in the
// constructor means there is no filter.
- FetchStage* fetch = new FetchStage(&_txn, &ws, secondScan, NULL, coll);
+ FetchStage* fetch = new FetchStage(&_opCtx, &ws, secondScan, NULL, coll);
ah->addChild(fetch);
// Check that the AndHash stage returns docs {foo: 10, bar: 10}
@@ -869,12 +870,12 @@ public:
class QueryStageAndHashDeadChild : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -885,9 +886,9 @@ public:
// Child2: NEED_TIME, DEAD
{
WorkingSet ws;
- const auto andHashStage = make_unique<AndHashStage>(&_txn, &ws, coll);
+ const auto andHashStage = make_unique<AndHashStage>(&_opCtx, &ws, coll);
- auto childStage1 = make_unique<QueuedDataStage>(&_txn, &ws);
+ auto childStage1 = make_unique<QueuedDataStage>(&_opCtx, &ws);
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
@@ -897,7 +898,7 @@ public:
childStage1->pushBack(id);
}
- auto childStage2 = make_unique<QueuedDataStage>(&_txn, &ws);
+ auto childStage2 = make_unique<QueuedDataStage>(&_opCtx, &ws);
childStage2->pushBack(PlanStage::NEED_TIME);
childStage2->pushBack(PlanStage::DEAD);
@@ -918,9 +919,9 @@ public:
// Child2: Data
{
WorkingSet ws;
- const auto andHashStage = make_unique<AndHashStage>(&_txn, &ws, coll);
+ const auto andHashStage = make_unique<AndHashStage>(&_opCtx, &ws, coll);
- auto childStage1 = make_unique<QueuedDataStage>(&_txn, &ws);
+ auto childStage1 = make_unique<QueuedDataStage>(&_opCtx, &ws);
{
WorkingSetID id = ws.allocate();
@@ -932,7 +933,7 @@ public:
}
childStage1->pushBack(PlanStage::DEAD);
- auto childStage2 = make_unique<QueuedDataStage>(&_txn, &ws);
+ auto childStage2 = make_unique<QueuedDataStage>(&_opCtx, &ws);
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
@@ -959,9 +960,9 @@ public:
// Child2: Data, DEAD
{
WorkingSet ws;
- const auto andHashStage = make_unique<AndHashStage>(&_txn, &ws, coll);
+ const auto andHashStage = make_unique<AndHashStage>(&_opCtx, &ws, coll);
- auto childStage1 = make_unique<QueuedDataStage>(&_txn, &ws);
+ auto childStage1 = make_unique<QueuedDataStage>(&_opCtx, &ws);
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
@@ -971,7 +972,7 @@ public:
childStage1->pushBack(id);
}
- auto childStage2 = make_unique<QueuedDataStage>(&_txn, &ws);
+ auto childStage2 = make_unique<QueuedDataStage>(&_opCtx, &ws);
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
@@ -1007,12 +1008,12 @@ public:
class QueryStageAndSortedInvalidation : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -1024,7 +1025,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = make_unique<AndSortedStage>(&_txn, &ws, coll);
+ auto ah = make_unique<AndSortedStage>(&_opCtx, &ws, coll);
// Scan over foo == 1
IndexScanParams params;
@@ -1034,11 +1035,11 @@ public:
params.bounds.endKey = BSON("" << 1);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Scan over bar == 1
params.descriptor = getIndex(BSON("bar" << 1), coll);
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Get the set of RecordIds in our collection to use later.
set<RecordId> data;
@@ -1056,8 +1057,8 @@ public:
// very first insert, which should be the very first thing in data. Let's invalidate it
// and make sure it shows up in the flagged results.
ah->saveState();
- ah->invalidate(&_txn, *data.begin(), INVALIDATION_DELETION);
- remove(coll->docFor(&_txn, *data.begin()).value());
+ ah->invalidate(&_opCtx, *data.begin(), INVALIDATION_DELETION);
+ remove(coll->docFor(&_opCtx, *data.begin()).value());
ah->restoreState();
// Make sure the nuked obj is actually in the flagged data.
@@ -1099,8 +1100,8 @@ public:
// Remove a result that's coming up. It's not the 'target' result of the AND so it's
// not flagged.
ah->saveState();
- ah->invalidate(&_txn, *it, INVALIDATION_DELETION);
- remove(coll->docFor(&_txn, *it).value());
+ ah->invalidate(&_opCtx, *it, INVALIDATION_DELETION);
+ remove(coll->docFor(&_opCtx, *it).value());
ah->restoreState();
// Get all results aside from the two we killed.
@@ -1131,12 +1132,12 @@ public:
class QueryStageAndSortedThreeLeaf : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -1157,7 +1158,7 @@ public:
addIndex(BSON("baz" << 1));
WorkingSet ws;
- auto ah = make_unique<AndSortedStage>(&_txn, &ws, coll);
+ auto ah = make_unique<AndSortedStage>(&_opCtx, &ws, coll);
// Scan over foo == 1
IndexScanParams params;
@@ -1167,15 +1168,15 @@ public:
params.bounds.endKey = BSON("" << 1);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// bar == 1
params.descriptor = getIndex(BSON("bar" << 1), coll);
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// baz == 1
params.descriptor = getIndex(BSON("baz" << 1), coll);
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
ASSERT_EQUALS(50, countResults(ah.get()));
}
@@ -1185,12 +1186,12 @@ public:
class QueryStageAndSortedWithNothing : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -1202,7 +1203,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = make_unique<AndSortedStage>(&_txn, &ws, coll);
+ auto ah = make_unique<AndSortedStage>(&_opCtx, &ws, coll);
// Foo == 7. Should be EOF.
IndexScanParams params;
@@ -1212,7 +1213,7 @@ public:
params.bounds.endKey = BSON("" << 7);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Bar == 20, not EOF.
params.descriptor = getIndex(BSON("bar" << 1), coll);
@@ -1220,7 +1221,7 @@ public:
params.bounds.endKey = BSON("" << 20);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
ASSERT_EQUALS(0, countResults(ah.get()));
}
@@ -1230,12 +1231,12 @@ public:
class QueryStageAndSortedProducesNothing : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -1251,7 +1252,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = make_unique<AndSortedStage>(&_txn, &ws, coll);
+ auto ah = make_unique<AndSortedStage>(&_opCtx, &ws, coll);
// foo == 7.
IndexScanParams params;
@@ -1261,7 +1262,7 @@ public:
params.bounds.endKey = BSON("" << 7);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// bar == 20.
params.descriptor = getIndex(BSON("bar" << 1), coll);
@@ -1269,7 +1270,7 @@ public:
params.bounds.endKey = BSON("" << 20);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
ASSERT_EQUALS(0, countResults(ah.get()));
}
@@ -1279,12 +1280,12 @@ public:
class QueryStageAndSortedByLastChild : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -1296,7 +1297,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = make_unique<AndHashStage>(&_txn, &ws, coll);
+ auto ah = make_unique<AndHashStage>(&_opCtx, &ws, coll);
// Scan over foo == 1
IndexScanParams params;
@@ -1306,13 +1307,13 @@ public:
params.bounds.endKey = BSON("" << 1);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// Intersect with 7 <= bar < 10000
params.descriptor = getIndex(BSON("bar" << 1), coll);
params.bounds.startKey = BSON("" << 7);
params.bounds.endKey = BSON("" << 10000);
- ah->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
WorkingSetID lastId = WorkingSet::INVALID_ID;
@@ -1323,11 +1324,11 @@ public:
if (PlanStage::ADVANCED != status) {
continue;
}
- BSONObj thisObj = coll->docFor(&_txn, ws.get(id)->recordId).value();
+ BSONObj thisObj = coll->docFor(&_opCtx, ws.get(id)->recordId).value();
ASSERT_EQUALS(7 + count, thisObj["bar"].numberInt());
++count;
if (WorkingSet::INVALID_ID != lastId) {
- BSONObj lastObj = coll->docFor(&_txn, ws.get(lastId)->recordId).value();
+ BSONObj lastObj = coll->docFor(&_opCtx, ws.get(lastId)->recordId).value();
ASSERT_LESS_THAN(lastObj["bar"].woCompare(thisObj["bar"]), 0);
}
lastId = id;
@@ -1344,12 +1345,12 @@ public:
class QueryStageAndSortedFirstChildFetched : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -1362,7 +1363,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- unique_ptr<AndSortedStage> as = make_unique<AndSortedStage>(&_txn, &ws, coll);
+ unique_ptr<AndSortedStage> as = make_unique<AndSortedStage>(&_opCtx, &ws, coll);
// Scan over foo == 1
IndexScanParams params;
@@ -1372,16 +1373,16 @@ public:
params.bounds.endKey = BSON("" << 1);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- IndexScan* firstScan = new IndexScan(&_txn, params, &ws, NULL);
+ IndexScan* firstScan = new IndexScan(&_opCtx, params, &ws, NULL);
// First child of the AND_SORTED stage is a Fetch. The NULL in the
// constructor means there is no filter.
- FetchStage* fetch = new FetchStage(&_txn, &ws, firstScan, NULL, coll);
+ FetchStage* fetch = new FetchStage(&_opCtx, &ws, firstScan, NULL, coll);
as->addChild(fetch);
// bar == 1
params.descriptor = getIndex(BSON("bar" << 1), coll);
- as->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ as->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
for (int i = 0; i < 50; i++) {
BSONObj obj = getNext(as.get(), &ws);
@@ -1398,12 +1399,12 @@ public:
class QueryStageAndSortedSecondChildFetched : public QueryStageAndBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = ctx.getCollection();
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -1416,7 +1417,7 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- unique_ptr<AndSortedStage> as = make_unique<AndSortedStage>(&_txn, &ws, coll);
+ unique_ptr<AndSortedStage> as = make_unique<AndSortedStage>(&_opCtx, &ws, coll);
// Scan over foo == 1
IndexScanParams params;
@@ -1426,15 +1427,15 @@ public:
params.bounds.endKey = BSON("" << 1);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- as->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ as->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
// bar == 1
params.descriptor = getIndex(BSON("bar" << 1), coll);
- IndexScan* secondScan = new IndexScan(&_txn, params, &ws, NULL);
+ IndexScan* secondScan = new IndexScan(&_opCtx, params, &ws, NULL);
// Second child of the AND_SORTED stage is a Fetch. The NULL in the
// constructor means there is no filter.
- FetchStage* fetch = new FetchStage(&_txn, &ws, secondScan, NULL, coll);
+ FetchStage* fetch = new FetchStage(&_opCtx, &ws, secondScan, NULL, coll);
as->addChild(fetch);
for (int i = 0; i < 50; i++) {
diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp
index c940e7c2baa..ed69f451117 100644
--- a/src/mongo/dbtests/query_stage_cached_plan.cpp
+++ b/src/mongo/dbtests/query_stage_cached_plan.cpp
@@ -63,7 +63,7 @@ public:
addIndex(BSON("a" << 1));
addIndex(BSON("b" << 1));
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
Collection* collection = ctx.getCollection();
ASSERT(collection);
@@ -74,38 +74,38 @@ public:
}
void addIndex(const BSONObj& obj) {
- ASSERT_OK(dbtests::createIndex(&_txn, nss.ns(), obj));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, nss.ns(), obj));
}
void dropCollection() {
- ScopedTransaction transaction(&_txn, MODE_X);
- Lock::DBLock dbLock(_txn.lockState(), nss.db(), MODE_X);
- Database* database = dbHolder().get(&_txn, nss.db());
+ ScopedTransaction transaction(&_opCtx, MODE_X);
+ Lock::DBLock dbLock(_opCtx.lockState(), nss.db(), MODE_X);
+ Database* database = dbHolder().get(&_opCtx, nss.db());
if (!database) {
return;
}
- WriteUnitOfWork wuow(&_txn);
- database->dropCollection(&_txn, nss.ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ database->dropCollection(&_opCtx, nss.ns());
wuow.commit();
}
void insertDocument(Collection* collection, BSONObj obj) {
- WriteUnitOfWork wuow(&_txn);
+ WriteUnitOfWork wuow(&_opCtx);
const bool enforceQuota = false;
OpDebug* const nullOpDebug = nullptr;
- ASSERT_OK(collection->insertDocument(&_txn, obj, nullOpDebug, enforceQuota));
+ ASSERT_OK(collection->insertDocument(&_opCtx, obj, nullOpDebug, enforceQuota));
wuow.commit();
}
- OperationContext* txn() {
- return &_txn;
+ OperationContext* opCtx() {
+ return &_opCtx;
}
protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
WorkingSet _ws;
};
@@ -116,7 +116,7 @@ protected:
class QueryStageCachedPlanFailure : public QueryStageCachedPlanBase {
public:
void run() {
- AutoGetCollectionForRead ctx(&_txn, nss);
+ AutoGetCollectionForRead ctx(&_opCtx, nss);
Collection* collection = ctx.getCollection();
ASSERT(collection);
@@ -124,7 +124,7 @@ public:
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson("{a: {$gte: 8}, b: 1}"));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
const std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -136,20 +136,20 @@ public:
// Get planner params.
QueryPlannerParams plannerParams;
- fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams);
+ fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams);
// Queued data stage will return a failure during the cached plan trial period.
- auto mockChild = stdx::make_unique<QueuedDataStage>(&_txn, &_ws);
+ auto mockChild = stdx::make_unique<QueuedDataStage>(&_opCtx, &_ws);
mockChild->pushBack(PlanStage::FAILURE);
// High enough so that we shouldn't trigger a replan based on works.
const size_t decisionWorks = 50;
CachedPlanStage cachedPlanStage(
- &_txn, collection, &_ws, cq.get(), plannerParams, decisionWorks, mockChild.release());
+ &_opCtx, collection, &_ws, cq.get(), plannerParams, decisionWorks, mockChild.release());
// This should succeed after triggering a replan.
PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL,
- _txn.getServiceContext()->getFastClockSource());
+ _opCtx.getServiceContext()->getFastClockSource());
ASSERT_OK(cachedPlanStage.pickBestPlan(&yieldPolicy));
// Make sure that we get 2 legit results back.
@@ -184,7 +184,7 @@ public:
class QueryStageCachedPlanHitMaxWorks : public QueryStageCachedPlanBase {
public:
void run() {
- AutoGetCollectionForRead ctx(&_txn, nss);
+ AutoGetCollectionForRead ctx(&_opCtx, nss);
Collection* collection = ctx.getCollection();
ASSERT(collection);
@@ -192,7 +192,7 @@ public:
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(fromjson("{a: {$gte: 8}, b: 1}"));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
const std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -204,24 +204,24 @@ public:
// Get planner params.
QueryPlannerParams plannerParams;
- fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams);
+ fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams);
// Set up queued data stage to take a long time before returning EOF. Should be long
// enough to trigger a replan.
const size_t decisionWorks = 10;
const size_t mockWorks =
1U + static_cast<size_t>(internalQueryCacheEvictionRatio * decisionWorks);
- auto mockChild = stdx::make_unique<QueuedDataStage>(&_txn, &_ws);
+ auto mockChild = stdx::make_unique<QueuedDataStage>(&_opCtx, &_ws);
for (size_t i = 0; i < mockWorks; i++) {
mockChild->pushBack(PlanStage::NEED_TIME);
}
CachedPlanStage cachedPlanStage(
- &_txn, collection, &_ws, cq.get(), plannerParams, decisionWorks, mockChild.release());
+ &_opCtx, collection, &_ws, cq.get(), plannerParams, decisionWorks, mockChild.release());
// This should succeed after triggering a replan.
PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL,
- _txn.getServiceContext()->getFastClockSource());
+ _opCtx.getServiceContext()->getFastClockSource());
ASSERT_OK(cachedPlanStage.pickBestPlan(&yieldPolicy));
// Make sure that we get 2 legit results back.
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index c1dbe116da6..e74ba02f361 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -64,8 +64,8 @@ static const NamespaceString nss{"unittests.QueryStageCollectionScan"};
class QueryStageCollectionScanBase {
public:
- QueryStageCollectionScanBase() : _client(&_txn) {
- OldClientWriteContext ctx(&_txn, nss.ns());
+ QueryStageCollectionScanBase() : _client(&_opCtx) {
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
for (int i = 0; i < numObj(); ++i) {
BSONObjBuilder bob;
@@ -75,7 +75,7 @@ public:
}
virtual ~QueryStageCollectionScanBase() {
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
_client.dropCollection(nss.ns());
}
@@ -84,7 +84,7 @@ public:
}
int countResults(CollectionScanParams::Direction direction, const BSONObj& filterObj) {
- AutoGetCollectionForRead ctx(&_txn, nss);
+ AutoGetCollectionForRead ctx(&_opCtx, nss);
// Configure the scan.
CollectionScanParams params;
@@ -102,10 +102,10 @@ public:
// Make a scan and have the runner own it.
unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
unique_ptr<PlanStage> ps =
- make_unique<CollectionScan>(&_txn, params, ws.get(), filterExpr.get());
+ make_unique<CollectionScan>(&_opCtx, params, ws.get(), filterExpr.get());
auto statusWithPlanExecutor = PlanExecutor::make(
- &_txn, std::move(ws), std::move(ps), params.collection, PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(ps), params.collection, PlanExecutor::YIELD_MANUAL);
ASSERT_OK(statusWithPlanExecutor.getStatus());
unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
@@ -129,7 +129,7 @@ public:
params.direction = direction;
params.tailable = false;
- unique_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL));
+ unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, params, &ws, NULL));
while (!scan->isEOF()) {
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = scan->work(&id);
@@ -147,7 +147,7 @@ public:
protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
private:
DBDirectClient _client;
@@ -206,7 +206,7 @@ public:
class QueryStageCollscanObjectsInOrderForward : public QueryStageCollectionScanBase {
public:
void run() {
- AutoGetCollectionForRead ctx(&_txn, nss);
+ AutoGetCollectionForRead ctx(&_opCtx, nss);
// Configure the scan.
CollectionScanParams params;
@@ -216,10 +216,10 @@ public:
// Make a scan and have the runner own it.
unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
- unique_ptr<PlanStage> ps = make_unique<CollectionScan>(&_txn, params, ws.get(), nullptr);
+ unique_ptr<PlanStage> ps = make_unique<CollectionScan>(&_opCtx, params, ws.get(), nullptr);
auto statusWithPlanExecutor = PlanExecutor::make(
- &_txn, std::move(ws), std::move(ps), params.collection, PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(ps), params.collection, PlanExecutor::YIELD_MANUAL);
ASSERT_OK(statusWithPlanExecutor.getStatus());
unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
@@ -242,7 +242,7 @@ public:
class QueryStageCollscanObjectsInOrderBackward : public QueryStageCollectionScanBase {
public:
void run() {
- AutoGetCollectionForRead ctx(&_txn, nss);
+ AutoGetCollectionForRead ctx(&_opCtx, nss);
CollectionScanParams params;
params.collection = ctx.getCollection();
@@ -250,10 +250,10 @@ public:
params.tailable = false;
unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
- unique_ptr<PlanStage> ps = make_unique<CollectionScan>(&_txn, params, ws.get(), nullptr);
+ unique_ptr<PlanStage> ps = make_unique<CollectionScan>(&_opCtx, params, ws.get(), nullptr);
auto statusWithPlanExecutor = PlanExecutor::make(
- &_txn, std::move(ws), std::move(ps), params.collection, PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(ps), params.collection, PlanExecutor::YIELD_MANUAL);
ASSERT_OK(statusWithPlanExecutor.getStatus());
unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
@@ -276,7 +276,7 @@ public:
class QueryStageCollscanInvalidateUpcomingObject : public QueryStageCollectionScanBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
Collection* coll = ctx.getCollection();
@@ -291,7 +291,7 @@ public:
params.tailable = false;
WorkingSet ws;
- unique_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL));
+ unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, params, &ws, NULL));
int count = 0;
while (count < 10) {
@@ -299,7 +299,7 @@ public:
PlanStage::StageState state = scan->work(&id);
if (PlanStage::ADVANCED == state) {
WorkingSetMember* member = ws.get(id);
- ASSERT_EQUALS(coll->docFor(&_txn, recordIds[count]).value()["foo"].numberInt(),
+ ASSERT_EQUALS(coll->docFor(&_opCtx, recordIds[count]).value()["foo"].numberInt(),
member->obj.value()["foo"].numberInt());
++count;
}
@@ -308,11 +308,11 @@ public:
// Remove recordIds[count].
scan->saveState();
{
- WriteUnitOfWork wunit(&_txn);
- scan->invalidate(&_txn, recordIds[count], INVALIDATION_DELETION);
+ WriteUnitOfWork wunit(&_opCtx);
+ scan->invalidate(&_opCtx, recordIds[count], INVALIDATION_DELETION);
wunit.commit(); // to avoid rollback of the invalidate
}
- remove(coll->docFor(&_txn, recordIds[count]).value());
+ remove(coll->docFor(&_opCtx, recordIds[count]).value());
scan->restoreState();
// Skip over recordIds[count].
@@ -324,7 +324,7 @@ public:
PlanStage::StageState state = scan->work(&id);
if (PlanStage::ADVANCED == state) {
WorkingSetMember* member = ws.get(id);
- ASSERT_EQUALS(coll->docFor(&_txn, recordIds[count]).value()["foo"].numberInt(),
+ ASSERT_EQUALS(coll->docFor(&_opCtx, recordIds[count]).value()["foo"].numberInt(),
member->obj.value()["foo"].numberInt());
++count;
}
@@ -342,7 +342,7 @@ public:
class QueryStageCollscanInvalidateUpcomingObjectBackward : public QueryStageCollectionScanBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
Collection* coll = ctx.getCollection();
// Get the RecordIds that would be returned by an in-order scan.
@@ -356,7 +356,7 @@ public:
params.tailable = false;
WorkingSet ws;
- unique_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL));
+ unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, params, &ws, NULL));
int count = 0;
while (count < 10) {
@@ -364,7 +364,7 @@ public:
PlanStage::StageState state = scan->work(&id);
if (PlanStage::ADVANCED == state) {
WorkingSetMember* member = ws.get(id);
- ASSERT_EQUALS(coll->docFor(&_txn, recordIds[count]).value()["foo"].numberInt(),
+ ASSERT_EQUALS(coll->docFor(&_opCtx, recordIds[count]).value()["foo"].numberInt(),
member->obj.value()["foo"].numberInt());
++count;
}
@@ -373,11 +373,11 @@ public:
// Remove recordIds[count].
scan->saveState();
{
- WriteUnitOfWork wunit(&_txn);
- scan->invalidate(&_txn, recordIds[count], INVALIDATION_DELETION);
+ WriteUnitOfWork wunit(&_opCtx);
+ scan->invalidate(&_opCtx, recordIds[count], INVALIDATION_DELETION);
wunit.commit(); // to avoid rollback of the invalidate
}
- remove(coll->docFor(&_txn, recordIds[count]).value());
+ remove(coll->docFor(&_opCtx, recordIds[count]).value());
scan->restoreState();
// Skip over recordIds[count].
@@ -389,7 +389,7 @@ public:
PlanStage::StageState state = scan->work(&id);
if (PlanStage::ADVANCED == state) {
WorkingSetMember* member = ws.get(id);
- ASSERT_EQUALS(coll->docFor(&_txn, recordIds[count]).value()["foo"].numberInt(),
+ ASSERT_EQUALS(coll->docFor(&_opCtx, recordIds[count]).value()["foo"].numberInt(),
member->obj.value()["foo"].numberInt());
++count;
}
diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp
index ccc08cab5a5..6a851dc9258 100644
--- a/src/mongo/dbtests/query_stage_count.cpp
+++ b/src/mongo/dbtests/query_stage_count.cpp
@@ -54,9 +54,9 @@ const int kInterjections = kDocuments;
class CountStageTest {
public:
CountStageTest()
- : _scopedXact(&_txn, MODE_IX),
- _dbLock(_txn.lockState(), nsToDatabaseSubstring(ns()), MODE_X),
- _ctx(&_txn, ns()),
+ : _scopedXact(&_opCtx, MODE_IX),
+ _dbLock(_opCtx.lockState(), nsToDatabaseSubstring(ns()), MODE_X),
+ _ctx(&_opCtx, ns()),
_coll(NULL) {}
virtual ~CountStageTest() {}
@@ -64,12 +64,12 @@ public:
virtual void interject(CountStage&, int) {}
virtual void setup() {
- WriteUnitOfWork wunit(&_txn);
+ WriteUnitOfWork wunit(&_opCtx);
- _ctx.db()->dropCollection(&_txn, ns());
- _coll = _ctx.db()->createCollection(&_txn, ns());
+ _ctx.db()->dropCollection(&_opCtx, ns());
+ _coll = _ctx.db()->createCollection(&_opCtx, ns());
- _coll->getIndexCatalog()->createIndexOnEmptyCollection(&_txn,
+ _coll->getIndexCatalog()->createIndexOnEmptyCollection(&_opCtx,
BSON("key" << BSON("x" << 1)
<< "name"
<< "x_1"
@@ -94,7 +94,7 @@ public:
params.direction = CollectionScanParams::FORWARD;
params.tailable = false;
- unique_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL));
+ unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, params, &ws, NULL));
while (!scan->isEOF()) {
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = scan->work(&id);
@@ -107,27 +107,27 @@ public:
}
void insert(const BSONObj& doc) {
- WriteUnitOfWork wunit(&_txn);
+ WriteUnitOfWork wunit(&_opCtx);
OpDebug* const nullOpDebug = nullptr;
- _coll->insertDocument(&_txn, doc, nullOpDebug, false);
+ _coll->insertDocument(&_opCtx, doc, nullOpDebug, false);
wunit.commit();
}
void remove(const RecordId& recordId) {
- WriteUnitOfWork wunit(&_txn);
+ WriteUnitOfWork wunit(&_opCtx);
OpDebug* const nullOpDebug = nullptr;
- _coll->deleteDocument(&_txn, recordId, nullOpDebug);
+ _coll->deleteDocument(&_opCtx, recordId, nullOpDebug);
wunit.commit();
}
void update(const RecordId& oldrecordId, const BSONObj& newDoc) {
- WriteUnitOfWork wunit(&_txn);
- BSONObj oldDoc = _coll->getRecordStore()->dataFor(&_txn, oldrecordId).releaseToBson();
+ WriteUnitOfWork wunit(&_opCtx);
+ BSONObj oldDoc = _coll->getRecordStore()->dataFor(&_opCtx, oldrecordId).releaseToBson();
OplogUpdateEntryArgs args;
args.ns = _coll->ns().ns();
- _coll->updateDocument(&_txn,
+ _coll->updateDocument(&_opCtx,
oldrecordId,
- Snapshotted<BSONObj>(_txn.recoveryUnit()->getSnapshotId(), oldDoc),
+ Snapshotted<BSONObj>(_opCtx.recoveryUnit()->getSnapshotId(), oldDoc),
newDoc,
false,
true,
@@ -163,7 +163,7 @@ public:
const bool useRecordStoreCount = false;
CountStageParams params(request, useRecordStoreCount);
- CountStage countStage(&_txn, _coll, std::move(params), ws.get(), scan);
+ CountStage countStage(&_opCtx, _coll, std::move(params), ws.get(), scan);
const CountStats* stats = runCount(countStage);
@@ -202,7 +202,7 @@ public:
IndexScan* createIndexScan(MatchExpression* expr, WorkingSet* ws) {
IndexCatalog* catalog = _coll->getIndexCatalog();
std::vector<IndexDescriptor*> indexes;
- catalog->findIndexesByKeyPattern(&_txn, BSON("x" << 1), false, &indexes);
+ catalog->findIndexesByKeyPattern(&_opCtx, BSON("x" << 1), false, &indexes);
ASSERT_EQ(indexes.size(), 1U);
IndexDescriptor* descriptor = indexes[0];
@@ -216,7 +216,7 @@ public:
params.direction = 1;
// This child stage gets owned and freed by its parent CountStage
- return new IndexScan(&_txn, params, ws, expr);
+ return new IndexScan(&_opCtx, params, ws, expr);
}
CollectionScan* createCollScan(MatchExpression* expr, WorkingSet* ws) {
@@ -224,7 +224,7 @@ public:
params.collection = _coll;
// This child stage gets owned and freed by its parent CountStage
- return new CollectionScan(&_txn, params, ws, expr);
+ return new CollectionScan(&_opCtx, params, ws, expr);
}
static const char* ns() {
@@ -234,7 +234,7 @@ public:
protected:
vector<RecordId> _recordIds;
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
ScopedTransaction _scopedXact;
Lock::DBLock _dbLock;
OldClientContext _ctx;
@@ -306,11 +306,11 @@ public:
if (interjection == 0) {
// At this point, our first interjection, we've counted _recordIds[0]
// and are about to count _recordIds[1]
- WriteUnitOfWork wunit(&_txn);
- count_stage.invalidate(&_txn, _recordIds[interjection], INVALIDATION_DELETION);
+ WriteUnitOfWork wunit(&_opCtx);
+ count_stage.invalidate(&_opCtx, _recordIds[interjection], INVALIDATION_DELETION);
remove(_recordIds[interjection]);
- count_stage.invalidate(&_txn, _recordIds[interjection + 1], INVALIDATION_DELETION);
+ count_stage.invalidate(&_opCtx, _recordIds[interjection + 1], INVALIDATION_DELETION);
remove(_recordIds[interjection + 1]);
wunit.commit();
}
@@ -331,12 +331,12 @@ public:
// At the point which this is called we are in between the first and second record
void interject(CountStage& count_stage, int interjection) {
if (interjection == 0) {
- count_stage.invalidate(&_txn, _recordIds[0], INVALIDATION_MUTATION);
- OID id1 = _coll->docFor(&_txn, _recordIds[0]).value().getField("_id").OID();
+ count_stage.invalidate(&_opCtx, _recordIds[0], INVALIDATION_MUTATION);
+ OID id1 = _coll->docFor(&_opCtx, _recordIds[0]).value().getField("_id").OID();
update(_recordIds[0], BSON("_id" << id1 << "x" << 100));
- count_stage.invalidate(&_txn, _recordIds[1], INVALIDATION_MUTATION);
- OID id2 = _coll->docFor(&_txn, _recordIds[1]).value().getField("_id").OID();
+ count_stage.invalidate(&_opCtx, _recordIds[1], INVALIDATION_MUTATION);
+ OID id2 = _coll->docFor(&_opCtx, _recordIds[1]).value().getField("_id").OID();
update(_recordIds[1], BSON("_id" << id2 << "x" << 100));
}
}
diff --git a/src/mongo/dbtests/query_stage_count_scan.cpp b/src/mongo/dbtests/query_stage_count_scan.cpp
index 9d2e82fa118..87d2b87b283 100644
--- a/src/mongo/dbtests/query_stage_count_scan.cpp
+++ b/src/mongo/dbtests/query_stage_count_scan.cpp
@@ -52,15 +52,15 @@ using std::shared_ptr;
class CountBase {
public:
- CountBase() : _client(&_txn) {}
+ CountBase() : _client(&_opCtx) {}
virtual ~CountBase() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
_client.dropCollection(ns());
}
void addIndex(const BSONObj& obj) {
- ASSERT_OK(dbtests::createIndex(&_txn, ns(), obj));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), obj));
}
void insert(const BSONObj& obj) {
@@ -93,7 +93,7 @@ public:
IndexDescriptor* getIndex(Database* db, const BSONObj& obj) {
Collection* collection = db->getCollection(ns());
std::vector<IndexDescriptor*> indexes;
- collection->getIndexCatalog()->findIndexesByKeyPattern(&_txn, obj, false, &indexes);
+ collection->getIndexCatalog()->findIndexesByKeyPattern(&_opCtx, obj, false, &indexes);
return indexes.empty() ? nullptr : indexes[0];
}
@@ -103,7 +103,7 @@ public:
protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
private:
DBDirectClient _client;
@@ -116,7 +116,7 @@ private:
class QueryStageCountScanDups : public CountBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
// Insert some docs
insert(BSON("a" << BSON_ARRAY(5 << 7)));
@@ -135,7 +135,7 @@ public:
params.endKeyInclusive = true;
WorkingSet ws;
- CountScan count(&_txn, params, &ws);
+ CountScan count(&_opCtx, params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(2, numCounted);
@@ -148,7 +148,7 @@ public:
class QueryStageCountScanInclusiveBounds : public CountBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
// Insert some docs
for (int i = 0; i < 10; ++i) {
@@ -167,7 +167,7 @@ public:
params.endKeyInclusive = true;
WorkingSet ws;
- CountScan count(&_txn, params, &ws);
+ CountScan count(&_opCtx, params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(5, numCounted);
@@ -180,7 +180,7 @@ public:
class QueryStageCountScanExclusiveBounds : public CountBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
// Insert some docs
for (int i = 0; i < 10; ++i) {
@@ -199,7 +199,7 @@ public:
params.endKeyInclusive = false;
WorkingSet ws;
- CountScan count(&_txn, params, &ws);
+ CountScan count(&_opCtx, params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(3, numCounted);
@@ -212,7 +212,7 @@ public:
class QueryStageCountScanLowerBound : public CountBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
// Insert doc, add index
insert(BSON("a" << 2));
@@ -227,7 +227,7 @@ public:
params.endKeyInclusive = false;
WorkingSet ws;
- CountScan count(&_txn, params, &ws);
+ CountScan count(&_opCtx, params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(0, numCounted);
@@ -240,7 +240,7 @@ public:
class QueryStageCountScanNothingInInterval : public CountBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
// Insert documents, add index
insert(BSON("a" << 2));
@@ -256,7 +256,7 @@ public:
params.endKeyInclusive = false;
WorkingSet ws;
- CountScan count(&_txn, params, &ws);
+ CountScan count(&_opCtx, params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(0, numCounted);
@@ -270,7 +270,7 @@ public:
class QueryStageCountScanNothingInIntervalFirstMatchTooHigh : public CountBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
// Insert some documents, add index
insert(BSON("a" << 2));
@@ -286,7 +286,7 @@ public:
params.endKeyInclusive = true;
WorkingSet ws;
- CountScan count(&_txn, params, &ws);
+ CountScan count(&_opCtx, params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(0, numCounted);
@@ -300,7 +300,7 @@ public:
class QueryStageCountScanNoChangeDuringYield : public CountBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
// Insert documents, add index
for (int i = 0; i < 10; ++i) {
@@ -317,7 +317,7 @@ public:
params.endKeyInclusive = true;
WorkingSet ws;
- CountScan count(&_txn, params, &ws);
+ CountScan count(&_opCtx, params, &ws);
WorkingSetID wsid;
int numCounted = 0;
@@ -353,7 +353,7 @@ public:
class QueryStageCountScanDeleteDuringYield : public CountBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
// Insert documents, add index
for (int i = 0; i < 10; ++i) {
@@ -370,7 +370,7 @@ public:
params.endKeyInclusive = true;
WorkingSet ws;
- CountScan count(&_txn, params, &ws);
+ CountScan count(&_opCtx, params, &ws);
WorkingSetID wsid;
int numCounted = 0;
@@ -409,7 +409,7 @@ public:
class QueryStageCountScanInsertNewDocsDuringYield : public CountBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
// Insert documents, add index
for (int i = 0; i < 10; ++i) {
@@ -426,7 +426,7 @@ public:
params.endKeyInclusive = true;
WorkingSet ws;
- CountScan count(&_txn, params, &ws);
+ CountScan count(&_opCtx, params, &ws);
WorkingSetID wsid;
int numCounted = 0;
@@ -468,7 +468,7 @@ public:
class QueryStageCountScanBecomesMultiKeyDuringYield : public CountBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
// Insert documents, add index
for (int i = 0; i < 10; ++i) {
@@ -485,7 +485,7 @@ public:
params.endKeyInclusive = true;
WorkingSet ws;
- CountScan count(&_txn, params, &ws);
+ CountScan count(&_opCtx, params, &ws);
WorkingSetID wsid;
int numCounted = 0;
@@ -523,7 +523,7 @@ public:
class QueryStageCountScanUnusedKeys : public CountBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
// Insert docs, add index
for (int i = 0; i < 10; ++i) {
@@ -545,7 +545,7 @@ public:
params.endKeyInclusive = true;
WorkingSet ws;
- CountScan count(&_txn, params, &ws);
+ CountScan count(&_opCtx, params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(7, numCounted);
@@ -558,7 +558,7 @@ public:
class QueryStageCountScanUnusedEndKey : public CountBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
// Insert docs, add index
for (int i = 0; i < 10; ++i) {
@@ -578,7 +578,7 @@ public:
params.endKeyInclusive = true; // yes?
WorkingSet ws;
- CountScan count(&_txn, params, &ws);
+ CountScan count(&_opCtx, params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(9, numCounted);
@@ -591,7 +591,7 @@ public:
class QueryStageCountScanKeyBecomesUnusedDuringYield : public CountBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
// Insert documents, add index
for (int i = 0; i < 10; ++i) {
@@ -608,7 +608,7 @@ public:
params.endKeyInclusive = true;
WorkingSet ws;
- CountScan count(&_txn, params, &ws);
+ CountScan count(&_opCtx, params, &ws);
WorkingSetID wsid;
int numCounted = 0;
diff --git a/src/mongo/dbtests/query_stage_delete.cpp b/src/mongo/dbtests/query_stage_delete.cpp
index b487bc2c655..b3892c41ffa 100644
--- a/src/mongo/dbtests/query_stage_delete.cpp
+++ b/src/mongo/dbtests/query_stage_delete.cpp
@@ -61,8 +61,8 @@ static const NamespaceString nss("unittests.QueryStageDelete");
class QueryStageDeleteBase {
public:
- QueryStageDeleteBase() : _client(&_txn) {
- OldClientWriteContext ctx(&_txn, nss.ns());
+ QueryStageDeleteBase() : _client(&_opCtx) {
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
for (size_t i = 0; i < numObj(); ++i) {
BSONObjBuilder bob;
@@ -73,7 +73,7 @@ public:
}
virtual ~QueryStageDeleteBase() {
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
_client.dropCollection(nss.ns());
}
@@ -91,7 +91,7 @@ public:
params.direction = direction;
params.tailable = false;
- unique_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL));
+ unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, params, &ws, NULL));
while (!scan->isEOF()) {
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = scan->work(&id);
@@ -107,7 +107,7 @@ public:
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(query);
auto statusWithCQ = CanonicalQuery::canonicalize(
- &_txn, std::move(qr), ExtensionsCallbackDisallowExtensions());
+ &_opCtx, std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
@@ -118,7 +118,7 @@ public:
protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
private:
DBDirectClient _client;
@@ -132,7 +132,7 @@ private:
class QueryStageDeleteInvalidateUpcomingObject : public QueryStageDeleteBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
Collection* coll = ctx.getCollection();
@@ -151,11 +151,11 @@ public:
deleteStageParams.isMulti = true;
WorkingSet ws;
- DeleteStage deleteStage(&_txn,
+ DeleteStage deleteStage(&_opCtx,
deleteStageParams,
&ws,
coll,
- new CollectionScan(&_txn, collScanParams, &ws, NULL));
+ new CollectionScan(&_opCtx, collScanParams, &ws, NULL));
const DeleteStats* stats = static_cast<const DeleteStats*>(deleteStage.getSpecificStats());
@@ -170,11 +170,11 @@ public:
// Remove recordIds[targetDocIndex];
deleteStage.saveState();
{
- WriteUnitOfWork wunit(&_txn);
- deleteStage.invalidate(&_txn, recordIds[targetDocIndex], INVALIDATION_DELETION);
+ WriteUnitOfWork wunit(&_opCtx);
+ deleteStage.invalidate(&_opCtx, recordIds[targetDocIndex], INVALIDATION_DELETION);
wunit.commit();
}
- BSONObj targetDoc = coll->docFor(&_txn, recordIds[targetDocIndex]).value();
+ BSONObj targetDoc = coll->docFor(&_opCtx, recordIds[targetDocIndex]).value();
ASSERT(!targetDoc.isEmpty());
remove(targetDoc);
deleteStage.restoreState();
@@ -198,7 +198,7 @@ class QueryStageDeleteReturnOldDoc : public QueryStageDeleteBase {
public:
void run() {
// Various variables we'll need.
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
Collection* coll = ctx.getCollection();
const int targetDocIndex = 0;
const BSONObj query = BSON("foo" << BSON("$gte" << targetDocIndex));
@@ -211,7 +211,7 @@ public:
// Configure a QueuedDataStage to pass the first object in the collection back in a
// RID_AND_OBJ state.
- auto qds = make_unique<QueuedDataStage>(&_txn, ws.get());
+ auto qds = make_unique<QueuedDataStage>(&_opCtx, ws.get());
WorkingSetID id = ws->allocate();
WorkingSetMember* member = ws->get(id);
member->recordId = recordIds[targetDocIndex];
@@ -226,7 +226,7 @@ public:
deleteParams.canonicalQuery = cq.get();
const auto deleteStage =
- make_unique<DeleteStage>(&_txn, deleteParams, ws.get(), coll, qds.release());
+ make_unique<DeleteStage>(&_opCtx, deleteParams, ws.get(), coll, qds.release());
const DeleteStats* stats = static_cast<const DeleteStats*>(deleteStage->getSpecificStats());
@@ -267,14 +267,14 @@ class QueryStageDeleteSkipOwnedObjects : public QueryStageDeleteBase {
public:
void run() {
// Various variables we'll need.
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
Collection* coll = ctx.getCollection();
const BSONObj query = BSONObj();
const auto ws = make_unique<WorkingSet>();
const unique_ptr<CanonicalQuery> cq(canonicalize(query));
// Configure a QueuedDataStage to pass an OWNED_OBJ to the delete stage.
- auto qds = make_unique<QueuedDataStage>(&_txn, ws.get());
+ auto qds = make_unique<QueuedDataStage>(&_opCtx, ws.get());
{
WorkingSetID id = ws->allocate();
WorkingSetMember* member = ws->get(id);
@@ -289,7 +289,7 @@ public:
deleteParams.canonicalQuery = cq.get();
const auto deleteStage =
- make_unique<DeleteStage>(&_txn, deleteParams, ws.get(), coll, qds.release());
+ make_unique<DeleteStage>(&_opCtx, deleteParams, ws.get(), coll, qds.release());
const DeleteStats* stats = static_cast<const DeleteStats*>(deleteStage->getSpecificStats());
// Call work, passing the set up member to the delete stage.
diff --git a/src/mongo/dbtests/query_stage_distinct.cpp b/src/mongo/dbtests/query_stage_distinct.cpp
index d0bab154208..f5b2ac7bf48 100644
--- a/src/mongo/dbtests/query_stage_distinct.cpp
+++ b/src/mongo/dbtests/query_stage_distinct.cpp
@@ -52,14 +52,14 @@ static const NamespaceString nss{"unittests.QueryStageDistinct"};
class DistinctBase {
public:
- DistinctBase() : _client(&_txn) {}
+ DistinctBase() : _client(&_opCtx) {}
virtual ~DistinctBase() {
_client.dropCollection(nss.ns());
}
void addIndex(const BSONObj& obj) {
- ASSERT_OK(dbtests::createIndex(&_txn, nss.ns(), obj));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, nss.ns(), obj));
}
void insert(const BSONObj& obj) {
@@ -95,7 +95,7 @@ public:
protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
private:
DBDirectClient _client;
@@ -121,12 +121,12 @@ public:
// Make an index on a:1
addIndex(BSON("a" << 1));
- AutoGetCollectionForRead ctx(&_txn, nss);
+ AutoGetCollectionForRead ctx(&_opCtx, nss);
Collection* coll = ctx.getCollection();
// Set up the distinct stage.
std::vector<IndexDescriptor*> indexes;
- coll->getIndexCatalog()->findIndexesByKeyPattern(&_txn, BSON("a" << 1), false, &indexes);
+ coll->getIndexCatalog()->findIndexesByKeyPattern(&_opCtx, BSON("a" << 1), false, &indexes);
ASSERT_EQ(indexes.size(), 1U);
DistinctParams params;
@@ -141,7 +141,7 @@ public:
params.bounds.fields.push_back(oil);
WorkingSet ws;
- DistinctScan distinct(&_txn, params, &ws);
+ DistinctScan distinct(&_opCtx, params, &ws);
WorkingSetID wsid;
// Get our first result.
@@ -188,17 +188,17 @@ public:
// Make an index on a:1
addIndex(BSON("a" << 1));
- AutoGetCollectionForRead ctx(&_txn, nss);
+ AutoGetCollectionForRead ctx(&_opCtx, nss);
Collection* coll = ctx.getCollection();
// Set up the distinct stage.
std::vector<IndexDescriptor*> indexes;
- coll->getIndexCatalog()->findIndexesByKeyPattern(&_txn, BSON("a" << 1), false, &indexes);
+ coll->getIndexCatalog()->findIndexesByKeyPattern(&_opCtx, BSON("a" << 1), false, &indexes);
verify(indexes.size() == 1);
DistinctParams params;
params.descriptor = indexes[0];
- ASSERT_TRUE(params.descriptor->isMultikey(&_txn));
+ ASSERT_TRUE(params.descriptor->isMultikey(&_opCtx));
verify(params.descriptor);
params.direction = 1;
@@ -211,7 +211,7 @@ public:
params.bounds.fields.push_back(oil);
WorkingSet ws;
- DistinctScan distinct(&_txn, params, &ws);
+ DistinctScan distinct(&_opCtx, params, &ws);
// We should see each number in the range [1, 6] exactly once.
std::set<int> seen;
@@ -257,12 +257,12 @@ public:
addIndex(BSON("a" << 1 << "b" << 1));
- AutoGetCollectionForRead ctx(&_txn, nss);
+ AutoGetCollectionForRead ctx(&_opCtx, nss);
Collection* coll = ctx.getCollection();
std::vector<IndexDescriptor*> indices;
coll->getIndexCatalog()->findIndexesByKeyPattern(
- &_txn, BSON("a" << 1 << "b" << 1), false, &indices);
+ &_opCtx, BSON("a" << 1 << "b" << 1), false, &indices);
ASSERT_EQ(1U, indices.size());
DistinctParams params;
@@ -282,7 +282,7 @@ public:
params.bounds.fields.push_back(bOil);
WorkingSet ws;
- DistinctScan distinct(&_txn, params, &ws);
+ DistinctScan distinct(&_opCtx, params, &ws);
WorkingSetID wsid;
PlanStage::StageState state;
diff --git a/src/mongo/dbtests/query_stage_ensure_sorted.cpp b/src/mongo/dbtests/query_stage_ensure_sorted.cpp
index 9e930375317..982da6ad70a 100644
--- a/src/mongo/dbtests/query_stage_ensure_sorted.cpp
+++ b/src/mongo/dbtests/query_stage_ensure_sorted.cpp
@@ -56,10 +56,10 @@ public:
const char* inputStr,
const char* expectedStr,
CollatorInterface* collator = nullptr) {
- auto txn = _serviceContext.makeOperationContext();
+ auto opCtx = _serviceContext.makeOperationContext();
WorkingSet ws;
- auto queuedDataStage = stdx::make_unique<QueuedDataStage>(txn.get(), &ws);
+ auto queuedDataStage = stdx::make_unique<QueuedDataStage>(opCtx.get(), &ws);
BSONObj inputObj = fromjson(inputStr);
BSONElement inputElt = inputObj["input"];
ASSERT(inputElt.isABSONObj());
@@ -79,8 +79,8 @@ public:
// Initialization.
BSONObj pattern = fromjson(patternStr);
auto sortKeyGen = stdx::make_unique<SortKeyGeneratorStage>(
- txn.get(), queuedDataStage.release(), &ws, pattern, BSONObj(), collator);
- EnsureSortedStage ess(txn.get(), pattern, &ws, sortKeyGen.release());
+ opCtx.get(), queuedDataStage.release(), &ws, pattern, BSONObj(), collator);
+ EnsureSortedStage ess(opCtx.get(), pattern, &ws, sortKeyGen.release());
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = PlanStage::NEED_TIME;
@@ -112,13 +112,13 @@ protected:
};
TEST_F(QueryStageEnsureSortedTest, EnsureSortedEmptyWorkingSet) {
- auto txn = _serviceContext.makeOperationContext();
+ auto opCtx = _serviceContext.makeOperationContext();
WorkingSet ws;
- auto queuedDataStage = stdx::make_unique<QueuedDataStage>(txn.get(), &ws);
+ auto queuedDataStage = stdx::make_unique<QueuedDataStage>(opCtx.get(), &ws);
auto sortKeyGen = stdx::make_unique<SortKeyGeneratorStage>(
- txn.get(), queuedDataStage.release(), &ws, BSONObj(), BSONObj(), nullptr);
- EnsureSortedStage ess(txn.get(), BSONObj(), &ws, sortKeyGen.release());
+ opCtx.get(), queuedDataStage.release(), &ws, BSONObj(), BSONObj(), nullptr);
+ EnsureSortedStage ess(opCtx.get(), BSONObj(), &ws, sortKeyGen.release());
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = PlanStage::NEED_TIME;
diff --git a/src/mongo/dbtests/query_stage_fetch.cpp b/src/mongo/dbtests/query_stage_fetch.cpp
index fef93098606..709e2d1b760 100644
--- a/src/mongo/dbtests/query_stage_fetch.cpp
+++ b/src/mongo/dbtests/query_stage_fetch.cpp
@@ -56,14 +56,14 @@ using stdx::make_unique;
class QueryStageFetchBase {
public:
- QueryStageFetchBase() : _client(&_txn) {}
+ QueryStageFetchBase() : _client(&_opCtx) {}
virtual ~QueryStageFetchBase() {
_client.dropCollection(ns());
}
void getRecordIds(set<RecordId>* out, Collection* coll) {
- auto cursor = coll->getCursor(&_txn);
+ auto cursor = coll->getCursor(&_opCtx);
while (auto record = cursor->next()) {
out->insert(record->id);
}
@@ -83,7 +83,7 @@ public:
protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
DBDirectClient _client;
};
@@ -94,12 +94,12 @@ protected:
class FetchStageAlreadyFetched : public QueryStageFetchBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -112,14 +112,14 @@ public:
ASSERT_EQUALS(size_t(1), recordIds.size());
// Create a mock stage that returns the WSM.
- auto mockStage = make_unique<QueuedDataStage>(&_txn, &ws);
+ auto mockStage = make_unique<QueuedDataStage>(&_opCtx, &ws);
// Mock data.
{
WorkingSetID id = ws.allocate();
WorkingSetMember* mockMember = ws.get(id);
mockMember->recordId = *recordIds.begin();
- mockMember->obj = coll->docFor(&_txn, mockMember->recordId);
+ mockMember->obj = coll->docFor(&_opCtx, mockMember->recordId);
ws.transitionToRecordIdAndObj(id);
// Points into our DB.
mockStage->pushBack(id);
@@ -135,7 +135,7 @@ public:
}
unique_ptr<FetchStage> fetchStage(
- new FetchStage(&_txn, &ws, mockStage.release(), NULL, coll));
+ new FetchStage(&_opCtx, &ws, mockStage.release(), NULL, coll));
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state;
@@ -158,14 +158,14 @@ public:
class FetchStageFilter : public QueryStageFetchBase {
public:
void run() {
- ScopedTransaction transaction(&_txn, MODE_IX);
- Lock::DBLock lk(_txn.lockState(), nsToDatabaseSubstring(ns()), MODE_X);
- OldClientContext ctx(&_txn, ns());
+ ScopedTransaction transaction(&_opCtx, MODE_IX);
+ Lock::DBLock lk(_opCtx.lockState(), nsToDatabaseSubstring(ns()), MODE_X);
+ OldClientContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -178,7 +178,7 @@ public:
ASSERT_EQUALS(size_t(1), recordIds.size());
// Create a mock stage that returns the WSM.
- auto mockStage = make_unique<QueuedDataStage>(&_txn, &ws);
+ auto mockStage = make_unique<QueuedDataStage>(&_opCtx, &ws);
// Mock data.
{
@@ -203,7 +203,7 @@ public:
// Matcher requires that foo==6 but we only have data with foo==5.
unique_ptr<FetchStage> fetchStage(
- new FetchStage(&_txn, &ws, mockStage.release(), filterExpr.get(), coll));
+ new FetchStage(&_opCtx, &ws, mockStage.release(), filterExpr.get(), coll));
// First call should return a fetch request as it's not in memory.
WorkingSetID id = WorkingSet::INVALID_ID;
diff --git a/src/mongo/dbtests/query_stage_ixscan.cpp b/src/mongo/dbtests/query_stage_ixscan.cpp
index 45ee7723ecf..7ee19d8f352 100644
--- a/src/mongo/dbtests/query_stage_ixscan.cpp
+++ b/src/mongo/dbtests/query_stage_ixscan.cpp
@@ -46,21 +46,21 @@ const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2;
class IndexScanTest {
public:
IndexScanTest()
- : _scopedXact(&_txn, MODE_IX),
- _dbLock(_txn.lockState(), nsToDatabaseSubstring(ns()), MODE_X),
- _ctx(&_txn, ns()),
+ : _scopedXact(&_opCtx, MODE_IX),
+ _dbLock(_opCtx.lockState(), nsToDatabaseSubstring(ns()), MODE_X),
+ _ctx(&_opCtx, ns()),
_coll(NULL) {}
virtual ~IndexScanTest() {}
virtual void setup() {
- WriteUnitOfWork wunit(&_txn);
+ WriteUnitOfWork wunit(&_opCtx);
- _ctx.db()->dropCollection(&_txn, ns());
- _coll = _ctx.db()->createCollection(&_txn, ns());
+ _ctx.db()->dropCollection(&_opCtx, ns());
+ _coll = _ctx.db()->createCollection(&_opCtx, ns());
ASSERT_OK(_coll->getIndexCatalog()->createIndexOnEmptyCollection(
- &_txn,
+ &_opCtx,
BSON("ns" << ns() << "key" << BSON("x" << 1) << "name"
<< DBClientBase::genIndexName(BSON("x" << 1))
<< "v"
@@ -70,9 +70,9 @@ public:
}
void insert(const BSONObj& doc) {
- WriteUnitOfWork wunit(&_txn);
+ WriteUnitOfWork wunit(&_opCtx);
OpDebug* const nullOpDebug = nullptr;
- ASSERT_OK(_coll->insertDocument(&_txn, doc, nullOpDebug, false));
+ ASSERT_OK(_coll->insertDocument(&_opCtx, doc, nullOpDebug, false));
wunit.commit();
}
@@ -100,7 +100,7 @@ public:
IndexScan* createIndexScanSimpleRange(BSONObj startKey, BSONObj endKey) {
IndexCatalog* catalog = _coll->getIndexCatalog();
std::vector<IndexDescriptor*> indexes;
- catalog->findIndexesByKeyPattern(&_txn, BSON("x" << 1), false, &indexes);
+ catalog->findIndexesByKeyPattern(&_opCtx, BSON("x" << 1), false, &indexes);
ASSERT_EQ(indexes.size(), 1U);
// We are not testing indexing here so use maximal bounds
@@ -114,7 +114,7 @@ public:
// This child stage gets owned and freed by the caller.
MatchExpression* filter = NULL;
- return new IndexScan(&_txn, params, &_ws, filter);
+ return new IndexScan(&_opCtx, params, &_ws, filter);
}
IndexScan* createIndexScan(BSONObj startKey,
@@ -124,7 +124,7 @@ public:
int direction = 1) {
IndexCatalog* catalog = _coll->getIndexCatalog();
std::vector<IndexDescriptor*> indexes;
- catalog->findIndexesByKeyPattern(&_txn, BSON("x" << 1), false, &indexes);
+ catalog->findIndexesByKeyPattern(&_opCtx, BSON("x" << 1), false, &indexes);
ASSERT_EQ(indexes.size(), 1U);
IndexScanParams params;
@@ -139,7 +139,7 @@ public:
params.bounds.fields.push_back(oil);
MatchExpression* filter = NULL;
- return new IndexScan(&_txn, params, &_ws, filter);
+ return new IndexScan(&_opCtx, params, &_ws, filter);
}
static const char* ns() {
@@ -148,7 +148,7 @@ public:
protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
ScopedTransaction _scopedXact;
Lock::DBLock _dbLock;
diff --git a/src/mongo/dbtests/query_stage_keep.cpp b/src/mongo/dbtests/query_stage_keep.cpp
index 782cc0eaf91..c5a57396987 100644
--- a/src/mongo/dbtests/query_stage_keep.cpp
+++ b/src/mongo/dbtests/query_stage_keep.cpp
@@ -60,14 +60,14 @@ using stdx::make_unique;
class QueryStageKeepBase {
public:
- QueryStageKeepBase() : _client(&_txn) {}
+ QueryStageKeepBase() : _client(&_opCtx) {}
virtual ~QueryStageKeepBase() {
_client.dropCollection(ns());
}
void getLocs(set<RecordId>* out, Collection* coll) {
- auto cursor = coll->getCursor(&_txn);
+ auto cursor = coll->getCursor(&_opCtx);
while (auto record = cursor->next()) {
out->insert(record->id);
}
@@ -98,7 +98,7 @@ public:
protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
DBDirectClient _client;
};
@@ -111,12 +111,12 @@ protected:
class KeepStageBasic : public QueryStageKeepBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -142,12 +142,12 @@ public:
params.direction = CollectionScanParams::FORWARD;
params.tailable = false;
params.start = RecordId();
- CollectionScan* cs = new CollectionScan(&_txn, params, &ws, NULL);
+ CollectionScan* cs = new CollectionScan(&_opCtx, params, &ws, NULL);
// Create a KeepMutations stage to merge in the 10 flagged objects.
// Takes ownership of 'cs'
MatchExpression* nullFilter = NULL;
- auto keep = make_unique<KeepMutationsStage>(&_txn, nullFilter, &ws, cs);
+ auto keep = make_unique<KeepMutationsStage>(&_opCtx, nullFilter, &ws, cs);
for (size_t i = 0; i < 10; ++i) {
WorkingSetID id = getNextResult(keep.get());
@@ -178,13 +178,13 @@ public:
class KeepStageFlagAdditionalAfterStreamingStarts : public QueryStageKeepBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
WorkingSet ws;
@@ -195,7 +195,8 @@ public:
// Create a KeepMutationsStage with an EOF child, and flag 50 objects. We expect these
// objects to be returned by the KeepMutationsStage.
MatchExpression* nullFilter = NULL;
- auto keep = make_unique<KeepMutationsStage>(&_txn, nullFilter, &ws, new EOFStage(&_txn));
+ auto keep =
+ make_unique<KeepMutationsStage>(&_opCtx, nullFilter, &ws, new EOFStage(&_opCtx));
for (size_t i = 0; i < 50; ++i) {
WorkingSetID id = ws.allocate();
WorkingSetMember* member = ws.get(id);
diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp
index e69c0e9d18b..7eb05f0efcc 100644
--- a/src/mongo/dbtests/query_stage_merge_sort.cpp
+++ b/src/mongo/dbtests/query_stage_merge_sort.cpp
@@ -57,20 +57,20 @@ using stdx::make_unique;
class QueryStageMergeSortTestBase {
public:
- QueryStageMergeSortTestBase() : _client(&_txn) {}
+ QueryStageMergeSortTestBase() : _client(&_opCtx) {}
virtual ~QueryStageMergeSortTestBase() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
_client.dropCollection(ns());
}
void addIndex(const BSONObj& obj) {
- ASSERT_OK(dbtests::createIndex(&_txn, ns(), obj));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), obj));
}
IndexDescriptor* getIndex(const BSONObj& obj, Collection* coll) {
std::vector<IndexDescriptor*> indexes;
- coll->getIndexCatalog()->findIndexesByKeyPattern(&_txn, obj, false, &indexes);
+ coll->getIndexCatalog()->findIndexesByKeyPattern(&_opCtx, obj, false, &indexes);
return indexes.empty() ? nullptr : indexes[0];
}
@@ -83,7 +83,7 @@ public:
}
void getRecordIds(set<RecordId>* out, Collection* coll) {
- auto cursor = coll->getCursor(&_txn);
+ auto cursor = coll->getCursor(&_opCtx);
while (auto record = cursor->next()) {
out->insert(record->id);
}
@@ -109,7 +109,7 @@ public:
protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
private:
DBDirectClient _client;
@@ -120,12 +120,12 @@ private:
class QueryStageMergeSortPrefixIndex : public QueryStageMergeSortTestBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -146,7 +146,7 @@ public:
// Sort by c:1
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << 1);
- MergeSortStage* ms = new MergeSortStage(&_txn, msparams, ws.get(), coll);
+ MergeSortStage* ms = new MergeSortStage(&_opCtx, msparams, ws.get(), coll);
// a:1
IndexScanParams params;
@@ -156,17 +156,17 @@ public:
params.bounds.endKey = objWithMaxKey(1);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL));
+ ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL));
// b:1
params.descriptor = getIndex(secondIndex, coll);
- ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL));
+ ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL));
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_txn, ws.get(), ms, nullptr, coll);
+ make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll);
// Must fetch if we want to easily pull out an obj.
auto statusWithPlanExecutor = PlanExecutor::make(
- &_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
ASSERT_OK(statusWithPlanExecutor.getStatus());
unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
@@ -191,12 +191,12 @@ public:
class QueryStageMergeSortDups : public QueryStageMergeSortTestBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -217,7 +217,7 @@ public:
// Sort by c:1
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << 1);
- MergeSortStage* ms = new MergeSortStage(&_txn, msparams, ws.get(), coll);
+ MergeSortStage* ms = new MergeSortStage(&_opCtx, msparams, ws.get(), coll);
// a:1
IndexScanParams params;
@@ -227,16 +227,16 @@ public:
params.bounds.endKey = objWithMaxKey(1);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL));
+ ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL));
// b:1
params.descriptor = getIndex(secondIndex, coll);
- ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL));
+ ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL));
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_txn, ws.get(), ms, nullptr, coll);
+ make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll);
auto statusWithPlanExecutor = PlanExecutor::make(
- &_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
ASSERT_OK(statusWithPlanExecutor.getStatus());
unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
@@ -261,12 +261,12 @@ public:
class QueryStageMergeSortDupsNoDedup : public QueryStageMergeSortTestBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -287,7 +287,7 @@ public:
MergeSortStageParams msparams;
msparams.dedup = false;
msparams.pattern = BSON("c" << 1);
- MergeSortStage* ms = new MergeSortStage(&_txn, msparams, ws.get(), coll);
+ MergeSortStage* ms = new MergeSortStage(&_opCtx, msparams, ws.get(), coll);
// a:1
IndexScanParams params;
@@ -297,16 +297,16 @@ public:
params.bounds.endKey = objWithMaxKey(1);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL));
+ ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL));
// b:1
params.descriptor = getIndex(secondIndex, coll);
- ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL));
+ ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL));
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_txn, ws.get(), ms, nullptr, coll);
+ make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll);
auto statusWithPlanExecutor = PlanExecutor::make(
- &_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
ASSERT_OK(statusWithPlanExecutor.getStatus());
unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
@@ -332,12 +332,12 @@ public:
class QueryStageMergeSortPrefixIndexReverse : public QueryStageMergeSortTestBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -359,7 +359,7 @@ public:
// Sort by c:-1
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << -1);
- MergeSortStage* ms = new MergeSortStage(&_txn, msparams, ws.get(), coll);
+ MergeSortStage* ms = new MergeSortStage(&_opCtx, msparams, ws.get(), coll);
// a:1
IndexScanParams params;
@@ -370,16 +370,16 @@ public:
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
// This is the direction along the index.
params.direction = 1;
- ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL));
+ ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL));
// b:1
params.descriptor = getIndex(secondIndex, coll);
- ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL));
+ ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL));
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_txn, ws.get(), ms, nullptr, coll);
+ make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll);
auto statusWithPlanExecutor = PlanExecutor::make(
- &_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
ASSERT_OK(statusWithPlanExecutor.getStatus());
unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
@@ -404,12 +404,12 @@ public:
class QueryStageMergeSortOneStageEOF : public QueryStageMergeSortTestBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -430,7 +430,7 @@ public:
// Sort by c:1
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << 1);
- MergeSortStage* ms = new MergeSortStage(&_txn, msparams, ws.get(), coll);
+ MergeSortStage* ms = new MergeSortStage(&_opCtx, msparams, ws.get(), coll);
// a:1
IndexScanParams params;
@@ -440,18 +440,18 @@ public:
params.bounds.endKey = objWithMaxKey(1);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL));
+ ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL));
// b:51 (EOF)
params.descriptor = getIndex(secondIndex, coll);
params.bounds.startKey = BSON("" << 51 << "" << MinKey);
params.bounds.endKey = BSON("" << 51 << "" << MaxKey);
- ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL));
+ ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL));
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_txn, ws.get(), ms, nullptr, coll);
+ make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll);
auto statusWithPlanExecutor = PlanExecutor::make(
- &_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
ASSERT_OK(statusWithPlanExecutor.getStatus());
unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
@@ -473,12 +473,12 @@ public:
class QueryStageMergeSortManyShort : public QueryStageMergeSortTestBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -486,7 +486,7 @@ public:
// Sort by foo:1
MergeSortStageParams msparams;
msparams.pattern = BSON("foo" << 1);
- MergeSortStage* ms = new MergeSortStage(&_txn, msparams, ws.get(), coll);
+ MergeSortStage* ms = new MergeSortStage(&_opCtx, msparams, ws.get(), coll);
IndexScanParams params;
params.bounds.isSimpleRange = true;
@@ -504,13 +504,13 @@ public:
BSONObj indexSpec = BSON(index << 1 << "foo" << 1);
addIndex(indexSpec);
params.descriptor = getIndex(indexSpec, coll);
- ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL));
+ ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL));
}
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_txn, ws.get(), ms, nullptr, coll);
+ make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll);
auto statusWithPlanExecutor = PlanExecutor::make(
- &_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
ASSERT_OK(statusWithPlanExecutor.getStatus());
unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
@@ -532,12 +532,12 @@ public:
class QueryStageMergeSortInvalidation : public QueryStageMergeSortTestBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -545,7 +545,7 @@ public:
// Sort by foo:1
MergeSortStageParams msparams;
msparams.pattern = BSON("foo" << 1);
- auto ms = make_unique<MergeSortStage>(&_txn, msparams, &ws, coll);
+ auto ms = make_unique<MergeSortStage>(&_opCtx, msparams, &ws, coll);
IndexScanParams params;
params.bounds.isSimpleRange = true;
@@ -565,7 +565,7 @@ public:
BSONObj indexSpec = BSON(index << 1 << "foo" << 1);
addIndex(indexSpec);
params.descriptor = getIndex(indexSpec, coll);
- ms->addChild(new IndexScan(&_txn, params, &ws, NULL));
+ ms->addChild(new IndexScan(&_opCtx, params, &ws, NULL));
}
set<RecordId> recordIds;
@@ -596,7 +596,7 @@ public:
// Invalidate recordIds[11]. Should force a fetch and return the deleted document.
ms->saveState();
- ms->invalidate(&_txn, *it, INVALIDATION_DELETION);
+ ms->invalidate(&_opCtx, *it, INVALIDATION_DELETION);
ms->restoreState();
// Make sure recordIds[11] was fetched for us.
@@ -648,12 +648,12 @@ public:
class QueryStageMergeSortInvalidationMutationDedup : public QueryStageMergeSortTestBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -672,7 +672,7 @@ public:
WorkingSetMember* member;
MergeSortStageParams msparams;
msparams.pattern = BSON("a" << 1);
- auto ms = stdx::make_unique<MergeSortStage>(&_txn, msparams, &ws, coll);
+ auto ms = stdx::make_unique<MergeSortStage>(&_opCtx, msparams, &ws, coll);
// First child scans [5, 10].
{
@@ -684,7 +684,7 @@ public:
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
auto fetchStage = stdx::make_unique<FetchStage>(
- &_txn, &ws, new IndexScan(&_txn, params, &ws, nullptr), nullptr, coll);
+ &_opCtx, &ws, new IndexScan(&_opCtx, params, &ws, nullptr), nullptr, coll);
ms->addChild(fetchStage.release());
}
@@ -698,7 +698,7 @@ public:
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
auto fetchStage = stdx::make_unique<FetchStage>(
- &_txn, &ws, new IndexScan(&_txn, params, &ws, nullptr), nullptr, coll);
+ &_opCtx, &ws, new IndexScan(&_opCtx, params, &ws, nullptr), nullptr, coll);
ms->addChild(fetchStage.release());
}
@@ -710,7 +710,7 @@ public:
++it;
// Doc {a: 5} gets invalidated by an update.
- ms->invalidate(&_txn, *it, INVALIDATION_MUTATION);
+ ms->invalidate(&_opCtx, *it, INVALIDATION_MUTATION);
// Invalidated doc {a: 5} should still get returned.
member = getNextResult(&ws, ms.get());
@@ -745,12 +745,12 @@ private:
class QueryStageMergeSortStringsWithNullCollation : public QueryStageMergeSortTestBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -774,7 +774,7 @@ public:
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << 1 << "d" << 1);
msparams.collator = nullptr;
- MergeSortStage* ms = new MergeSortStage(&_txn, msparams, ws.get(), coll);
+ MergeSortStage* ms = new MergeSortStage(&_opCtx, msparams, ws.get(), coll);
// a:1
IndexScanParams params;
@@ -784,17 +784,17 @@ public:
params.bounds.endKey = objWithMaxKey(1);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL));
+ ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL));
// b:1
params.descriptor = getIndex(secondIndex, coll);
- ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL));
+ ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL));
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_txn, ws.get(), ms, nullptr, coll);
+ make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll);
// Must fetch if we want to easily pull out an obj.
auto statusWithPlanExecutor = PlanExecutor::make(
- &_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
ASSERT_OK(statusWithPlanExecutor.getStatus());
unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
@@ -818,12 +818,12 @@ public:
class QueryStageMergeSortStringsRespectsCollation : public QueryStageMergeSortTestBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -848,7 +848,7 @@ public:
msparams.pattern = BSON("c" << 1 << "d" << 1);
CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
msparams.collator = &collator;
- MergeSortStage* ms = new MergeSortStage(&_txn, msparams, ws.get(), coll);
+ MergeSortStage* ms = new MergeSortStage(&_opCtx, msparams, ws.get(), coll);
// a:1
IndexScanParams params;
@@ -858,17 +858,17 @@ public:
params.bounds.endKey = objWithMaxKey(1);
params.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
params.direction = 1;
- ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL));
+ ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL));
// b:1
params.descriptor = getIndex(secondIndex, coll);
- ms->addChild(new IndexScan(&_txn, params, ws.get(), NULL));
+ ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL));
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_txn, ws.get(), ms, nullptr, coll);
+ make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll);
// Must fetch if we want to easily pull out an obj.
auto statusWithPlanExecutor = PlanExecutor::make(
- &_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
ASSERT_OK(statusWithPlanExecutor.getStatus());
unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
diff --git a/src/mongo/dbtests/query_stage_multiplan.cpp b/src/mongo/dbtests/query_stage_multiplan.cpp
index 906e0e97d1c..3df262ce725 100644
--- a/src/mongo/dbtests/query_stage_multiplan.cpp
+++ b/src/mongo/dbtests/query_stage_multiplan.cpp
@@ -84,38 +84,38 @@ QuerySolution* createQuerySolution() {
class QueryStageMultiPlanBase {
public:
- QueryStageMultiPlanBase() : _client(&_txn) {
- OldClientWriteContext ctx(&_txn, nss.ns());
+ QueryStageMultiPlanBase() : _client(&_opCtx) {
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
_client.dropCollection(nss.ns());
}
virtual ~QueryStageMultiPlanBase() {
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
_client.dropCollection(nss.ns());
}
void addIndex(const BSONObj& obj) {
- ASSERT_OK(dbtests::createIndex(&_txn, nss.ns(), obj));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, nss.ns(), obj));
}
void insert(const BSONObj& obj) {
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
_client.insert(nss.ns(), obj);
}
void remove(const BSONObj& obj) {
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
_client.remove(nss.ns(), obj);
}
- OperationContext* txn() {
- return &_txn;
+ OperationContext* opCtx() {
+ return &_opCtx;
}
protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
- ClockSource* const _clock = _txn.getServiceContext()->getFastClockSource();
+ OperationContext& _opCtx = *_txnPtr;
+ ClockSource* const _clock = _opCtx.getServiceContext()->getFastClockSource();
DBDirectClient _client;
};
@@ -133,14 +133,15 @@ public:
addIndex(BSON("foo" << 1));
- AutoGetCollectionForRead ctx(&_txn, nss);
+ AutoGetCollectionForRead ctx(&_opCtx, nss);
const Collection* coll = ctx.getCollection();
// Plan 0: IXScan over foo == 7
// Every call to work() returns something so this should clearly win (by current scoring
// at least).
std::vector<IndexDescriptor*> indexes;
- coll->getIndexCatalog()->findIndexesByKeyPattern(&_txn, BSON("foo" << 1), false, &indexes);
+ coll->getIndexCatalog()->findIndexesByKeyPattern(
+ &_opCtx, BSON("foo" << 1), false, &indexes);
ASSERT_EQ(indexes.size(), 1U);
IndexScanParams ixparams;
@@ -152,8 +153,8 @@ public:
ixparams.direction = 1;
unique_ptr<WorkingSet> sharedWs(new WorkingSet());
- IndexScan* ix = new IndexScan(&_txn, ixparams, sharedWs.get(), NULL);
- unique_ptr<PlanStage> firstRoot(new FetchStage(&_txn, sharedWs.get(), ix, NULL, coll));
+ IndexScan* ix = new IndexScan(&_opCtx, ixparams, sharedWs.get(), NULL);
+ unique_ptr<PlanStage> firstRoot(new FetchStage(&_opCtx, sharedWs.get(), ix, NULL, coll));
// Plan 1: CollScan with matcher.
CollectionScanParams csparams;
@@ -169,19 +170,19 @@ public:
unique_ptr<MatchExpression> filter = std::move(statusWithMatcher.getValue());
// Make the stage.
unique_ptr<PlanStage> secondRoot(
- new CollectionScan(&_txn, csparams, sharedWs.get(), filter.get()));
+ new CollectionScan(&_opCtx, csparams, sharedWs.get(), filter.get()));
// Hand the plans off to the MPS.
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(BSON("foo" << 7));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions());
verify(statusWithCQ.isOK());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
verify(NULL != cq.get());
unique_ptr<MultiPlanStage> mps =
- make_unique<MultiPlanStage>(&_txn, ctx.getCollection(), cq.get());
+ make_unique<MultiPlanStage>(&_opCtx, ctx.getCollection(), cq.get());
mps->addPlan(createQuerySolution(), firstRoot.release(), sharedWs.get());
mps->addPlan(createQuerySolution(), secondRoot.release(), sharedWs.get());
@@ -192,7 +193,7 @@ public:
ASSERT_EQUALS(0, mps->bestPlanIdx());
// Takes ownership of arguments other than 'collection'.
- auto statusWithPlanExecutor = PlanExecutor::make(&_txn,
+ auto statusWithPlanExecutor = PlanExecutor::make(&_opCtx,
std::move(sharedWs),
std::move(mps),
std::move(cq),
@@ -226,7 +227,7 @@ public:
addIndex(BSON("a" << 1));
addIndex(BSON("b" << 1));
- AutoGetCollectionForRead ctx(&_txn, nss);
+ AutoGetCollectionForRead ctx(&_opCtx, nss);
Collection* collection = ctx.getCollection();
// Query for both 'a' and 'b' and sort on 'b'.
@@ -234,7 +235,7 @@ public:
qr->setFilter(BSON("a" << 1 << "b" << 1));
qr->setSort(BSON("b" << 1));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions());
verify(statusWithCQ.isOK());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(NULL != cq.get());
@@ -245,7 +246,7 @@ public:
// Get planner params.
QueryPlannerParams plannerParams;
- fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams);
+ fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams);
// Turn this off otherwise it pops up in some plans.
plannerParams.options &= ~QueryPlannerParams::KEEP_MUTATIONS;
@@ -259,12 +260,12 @@ public:
ASSERT_EQUALS(solutions.size(), 3U);
// Fill out the MultiPlanStage.
- unique_ptr<MultiPlanStage> mps(new MultiPlanStage(&_txn, collection, cq.get()));
+ unique_ptr<MultiPlanStage> mps(new MultiPlanStage(&_opCtx, collection, cq.get()));
unique_ptr<WorkingSet> ws(new WorkingSet());
// Put each solution from the planner into the MPR.
for (size_t i = 0; i < solutions.size(); ++i) {
PlanStage* root;
- ASSERT(StageBuilder::build(&_txn, collection, *cq, *solutions[i], ws.get(), &root));
+ ASSERT(StageBuilder::build(&_opCtx, collection, *cq, *solutions[i], ws.get(), &root));
// Takes ownership of 'solutions[i]' and 'root'.
mps->addPlan(solutions[i], root, ws.get());
}
@@ -323,8 +324,8 @@ public:
const int nDocs = 500;
auto ws = stdx::make_unique<WorkingSet>();
- auto firstPlan = stdx::make_unique<QueuedDataStage>(&_txn, ws.get());
- auto secondPlan = stdx::make_unique<QueuedDataStage>(&_txn, ws.get());
+ auto firstPlan = stdx::make_unique<QueuedDataStage>(&_opCtx, ws.get());
+ auto secondPlan = stdx::make_unique<QueuedDataStage>(&_opCtx, ws.get());
for (int i = 0; i < nDocs; ++i) {
addMember(firstPlan.get(), ws.get(), BSON("x" << 1));
@@ -334,14 +335,14 @@ public:
secondPlan->pushBack(PlanStage::NEED_TIME);
}
- AutoGetCollectionForRead ctx(&_txn, nss);
+ AutoGetCollectionForRead ctx(&_opCtx, nss);
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(BSON("x" << 1));
auto cq = uassertStatusOK(CanonicalQuery::canonicalize(
- txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
unique_ptr<MultiPlanStage> mps =
- make_unique<MultiPlanStage>(&_txn, ctx.getCollection(), cq.get());
+ make_unique<MultiPlanStage>(&_opCtx, ctx.getCollection(), cq.get());
// Put each plan into the MultiPlanStage. Takes ownership of 'firstPlan' and 'secondPlan'.
auto firstSoln = stdx::make_unique<QuerySolution>();
@@ -350,8 +351,11 @@ public:
mps->addPlan(secondSoln.release(), secondPlan.release(), ws.get());
// Making a PlanExecutor chooses the best plan.
- auto exec = uassertStatusOK(PlanExecutor::make(
- &_txn, std::move(ws), std::move(mps), ctx.getCollection(), PlanExecutor::YIELD_MANUAL));
+ auto exec = uassertStatusOK(PlanExecutor::make(&_opCtx,
+ std::move(ws),
+ std::move(mps),
+ ctx.getCollection(),
+ PlanExecutor::YIELD_MANUAL));
auto root = static_cast<MultiPlanStage*>(exec->getRootStage());
ASSERT_TRUE(root->bestPlanChosen());
@@ -409,16 +413,16 @@ public:
addIndex(BSON("foo" << 1));
addIndex(BSON("foo" << -1 << "bar" << 1));
- AutoGetCollectionForRead ctx(&_txn, nss);
+ AutoGetCollectionForRead ctx(&_opCtx, nss);
Collection* coll = ctx.getCollection();
// Create the executor (Matching all documents).
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(BSON("foo" << BSON("$gte" << 0)));
auto cq = uassertStatusOK(CanonicalQuery::canonicalize(
- txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
auto exec =
- uassertStatusOK(getExecutor(&_txn, coll, std::move(cq), PlanExecutor::YIELD_MANUAL));
+ uassertStatusOK(getExecutor(&_opCtx, coll, std::move(cq), PlanExecutor::YIELD_MANUAL));
ASSERT_EQ(exec->getRootStage()->stageType(), STAGE_MULTI_PLAN);
diff --git a/src/mongo/dbtests/query_stage_near.cpp b/src/mongo/dbtests/query_stage_near.cpp
index d973779e7ed..8b3c02fb540 100644
--- a/src/mongo/dbtests/query_stage_near.cpp
+++ b/src/mongo/dbtests/query_stage_near.cpp
@@ -78,7 +78,7 @@ public:
_intervals.mutableVector().push_back(new MockInterval(data, min, max));
}
- virtual StatusWith<CoveredInterval*> nextInterval(OperationContext* txn,
+ virtual StatusWith<CoveredInterval*> nextInterval(OperationContext* opCtx,
WorkingSet* workingSet,
Collection* collection) {
if (_pos == static_cast<int>(_intervals.size()))
@@ -88,7 +88,7 @@ public:
bool lastInterval = _pos == static_cast<int>(_intervals.vector().size());
- auto queuedStage = make_unique<QueuedDataStage>(txn, workingSet);
+ auto queuedStage = make_unique<QueuedDataStage>(opCtx, workingSet);
for (unsigned int i = 0; i < interval.data.size(); i++) {
// Add all documents from the lastInterval into the QueuedDataStage.
@@ -109,7 +109,7 @@ public:
return StatusWith<double>(member->obj.value()["distance"].numberDouble());
}
- virtual StageState initialize(OperationContext* txn,
+ virtual StageState initialize(OperationContext* opCtx,
WorkingSet* workingSet,
Collection* collection,
WorkingSetID* out) {
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index 0d0184ea47a..f5ffa69d1e7 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -58,7 +58,7 @@ namespace dps = ::mongo::dotted_path_support;
class QueryStageSortTestBase {
public:
- QueryStageSortTestBase() : _client(&_txn) {}
+ QueryStageSortTestBase() : _client(&_opCtx) {}
void fillData() {
for (int i = 0; i < numObj(); ++i) {
@@ -75,7 +75,7 @@ public:
}
void getRecordIds(set<RecordId>* out, Collection* coll) {
- auto cursor = coll->getCursor(&_txn);
+ auto cursor = coll->getCursor(&_opCtx);
while (auto record = cursor->next()) {
out->insert(record->id);
}
@@ -97,7 +97,7 @@ public:
WorkingSetID id = ws->allocate();
WorkingSetMember* member = ws->get(id);
member->recordId = *it;
- member->obj = coll->docFor(&_txn, *it);
+ member->obj = coll->docFor(&_opCtx, *it);
ws->transitionToRecordIdAndObj(id);
ms->pushBack(id);
}
@@ -110,7 +110,7 @@ public:
PlanExecutor* makePlanExecutorWithSortStage(Collection* coll) {
// Build the mock scan stage which feeds the data.
auto ws = make_unique<WorkingSet>();
- auto queuedDataStage = make_unique<QueuedDataStage>(&_txn, ws.get());
+ auto queuedDataStage = make_unique<QueuedDataStage>(&_opCtx, ws.get());
insertVarietyOfObjects(ws.get(), queuedDataStage.get(), coll);
SortStageParams params;
@@ -119,14 +119,14 @@ public:
params.limit = limit();
auto keyGenStage = make_unique<SortKeyGeneratorStage>(
- &_txn, queuedDataStage.release(), ws.get(), params.pattern, BSONObj(), nullptr);
+ &_opCtx, queuedDataStage.release(), ws.get(), params.pattern, BSONObj(), nullptr);
- auto ss = make_unique<SortStage>(&_txn, params, ws.get(), keyGenStage.release());
+ auto ss = make_unique<SortStage>(&_opCtx, params, ws.get(), keyGenStage.release());
// The PlanExecutor will be automatically registered on construction due to the auto
// yield policy, so it can receive invalidations when we remove documents later.
- auto statusWithPlanExecutor =
- PlanExecutor::make(&_txn, std::move(ws), std::move(ss), coll, PlanExecutor::YIELD_AUTO);
+ auto statusWithPlanExecutor = PlanExecutor::make(
+ &_opCtx, std::move(ws), std::move(ss), coll, PlanExecutor::YIELD_AUTO);
invariant(statusWithPlanExecutor.isOK());
return statusWithPlanExecutor.getValue().release();
}
@@ -147,7 +147,7 @@ public:
*/
void sortAndCheck(int direction, Collection* coll) {
auto ws = make_unique<WorkingSet>();
- auto queuedDataStage = make_unique<QueuedDataStage>(&_txn, ws.get());
+ auto queuedDataStage = make_unique<QueuedDataStage>(&_opCtx, ws.get());
// Insert a mix of the various types of data.
insertVarietyOfObjects(ws.get(), queuedDataStage.get(), coll);
@@ -158,16 +158,16 @@ public:
params.limit = limit();
auto keyGenStage = make_unique<SortKeyGeneratorStage>(
- &_txn, queuedDataStage.release(), ws.get(), params.pattern, BSONObj(), nullptr);
+ &_opCtx, queuedDataStage.release(), ws.get(), params.pattern, BSONObj(), nullptr);
- auto sortStage = make_unique<SortStage>(&_txn, params, ws.get(), keyGenStage.release());
+ auto sortStage = make_unique<SortStage>(&_opCtx, params, ws.get(), keyGenStage.release());
auto fetchStage =
- make_unique<FetchStage>(&_txn, ws.get(), sortStage.release(), nullptr, coll);
+ make_unique<FetchStage>(&_opCtx, ws.get(), sortStage.release(), nullptr, coll);
// Must fetch so we can look at the doc as a BSONObj.
auto statusWithPlanExecutor = PlanExecutor::make(
- &_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
ASSERT_OK(statusWithPlanExecutor.getStatus());
unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
@@ -222,7 +222,7 @@ public:
protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
DBDirectClient _client;
};
@@ -235,12 +235,12 @@ public:
}
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -257,12 +257,12 @@ public:
}
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -288,12 +288,12 @@ public:
}
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -313,12 +313,12 @@ public:
}
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -347,7 +347,7 @@ public:
// Since it's in the WorkingSet, the updates should not be reflected in the output.
exec->saveState();
set<RecordId>::iterator it = recordIds.begin();
- Snapshotted<BSONObj> oldDoc = coll->docFor(&_txn, *it);
+ Snapshotted<BSONObj> oldDoc = coll->docFor(&_opCtx, *it);
OID updatedId = oldDoc.value().getField("_id").OID();
SnapshotId idBeforeUpdate = oldDoc.snapshotId();
@@ -358,8 +358,8 @@ public:
OplogUpdateEntryArgs args;
args.ns = coll->ns().ns();
{
- WriteUnitOfWork wuow(&_txn);
- coll->updateDocument(&_txn, *it, oldDoc, newDoc, false, false, NULL, &args);
+ WriteUnitOfWork wuow(&_opCtx);
+ coll->updateDocument(&_opCtx, *it, oldDoc, newDoc, false, false, NULL, &args);
wuow.commit();
}
exec->restoreState();
@@ -374,10 +374,10 @@ public:
// should be fetched.
exec->saveState();
while (it != recordIds.end()) {
- oldDoc = coll->docFor(&_txn, *it);
+ oldDoc = coll->docFor(&_opCtx, *it);
{
- WriteUnitOfWork wuow(&_txn);
- coll->updateDocument(&_txn, *it++, oldDoc, newDoc, false, false, NULL, &args);
+ WriteUnitOfWork wuow(&_opCtx);
+ coll->updateDocument(&_opCtx, *it++, oldDoc, newDoc, false, false, NULL, &args);
wuow.commit();
}
}
@@ -422,12 +422,12 @@ public:
}
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
@@ -457,8 +457,8 @@ public:
OpDebug* const nullOpDebug = nullptr;
set<RecordId>::iterator it = recordIds.begin();
{
- WriteUnitOfWork wuow(&_txn);
- coll->deleteDocument(&_txn, *it++, nullOpDebug);
+ WriteUnitOfWork wuow(&_opCtx);
+ coll->deleteDocument(&_opCtx, *it++, nullOpDebug);
wuow.commit();
}
exec->restoreState();
@@ -473,8 +473,8 @@ public:
exec->saveState();
while (it != recordIds.end()) {
{
- WriteUnitOfWork wuow(&_txn);
- coll->deleteDocument(&_txn, *it++, nullOpDebug);
+ WriteUnitOfWork wuow(&_opCtx);
+ coll->deleteDocument(&_opCtx, *it++, nullOpDebug);
wuow.commit();
}
}
@@ -521,17 +521,17 @@ public:
}
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- WriteUnitOfWork wuow(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wuow(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wuow.commit();
}
auto ws = make_unique<WorkingSet>();
- auto queuedDataStage = make_unique<QueuedDataStage>(&_txn, ws.get());
+ auto queuedDataStage = make_unique<QueuedDataStage>(&_opCtx, ws.get());
for (int i = 0; i < numObj(); ++i) {
{
@@ -557,16 +557,16 @@ public:
params.limit = 0;
auto keyGenStage = make_unique<SortKeyGeneratorStage>(
- &_txn, queuedDataStage.release(), ws.get(), params.pattern, BSONObj(), nullptr);
+ &_opCtx, queuedDataStage.release(), ws.get(), params.pattern, BSONObj(), nullptr);
- auto sortStage = make_unique<SortStage>(&_txn, params, ws.get(), keyGenStage.release());
+ auto sortStage = make_unique<SortStage>(&_opCtx, params, ws.get(), keyGenStage.release());
auto fetchStage =
- make_unique<FetchStage>(&_txn, ws.get(), sortStage.release(), nullptr, coll);
+ make_unique<FetchStage>(&_opCtx, ws.get(), sortStage.release(), nullptr, coll);
// We don't get results back since we're sorting some parallel arrays.
auto statusWithPlanExecutor = PlanExecutor::make(
- &_txn, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::YIELD_MANUAL);
unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
PlanExecutor::ExecState runnerState = exec->getNext(NULL, NULL);
diff --git a/src/mongo/dbtests/query_stage_subplan.cpp b/src/mongo/dbtests/query_stage_subplan.cpp
index 53c839d0fd4..b89df74ac83 100644
--- a/src/mongo/dbtests/query_stage_subplan.cpp
+++ b/src/mongo/dbtests/query_stage_subplan.cpp
@@ -50,23 +50,23 @@ static const NamespaceString nss("unittests.QueryStageSubplan");
class QueryStageSubplanBase {
public:
- QueryStageSubplanBase() : _client(&_txn) {}
+ QueryStageSubplanBase() : _client(&_opCtx) {}
virtual ~QueryStageSubplanBase() {
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
_client.dropCollection(nss.ns());
}
void addIndex(const BSONObj& obj) {
- ASSERT_OK(dbtests::createIndex(&_txn, nss.ns(), obj));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, nss.ns(), obj));
}
void insert(const BSONObj& doc) {
_client.insert(nss.ns(), doc);
}
- OperationContext* txn() {
- return &_txn;
+ OperationContext* opCtx() {
+ return &_opCtx;
}
protected:
@@ -80,13 +80,13 @@ protected:
auto qr = unittest::assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain));
auto cq = unittest::assertGet(
- CanonicalQuery::canonicalize(txn(), std::move(qr), ExtensionsCallbackNoop()));
+ CanonicalQuery::canonicalize(opCtx(), std::move(qr), ExtensionsCallbackNoop()));
return cq;
}
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
- ClockSource* _clock = _txn.getServiceContext()->getFastClockSource();
+ OperationContext& _opCtx = *_txnPtr;
+ ClockSource* _clock = _opCtx.getServiceContext()->getFastClockSource();
private:
DBDirectClient _client;
@@ -101,7 +101,7 @@ private:
class QueryStageSubplanGeo2dOr : public QueryStageSubplanBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
addIndex(BSON("a"
<< "2d"
<< "b"
@@ -116,7 +116,7 @@ public:
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(query);
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -124,11 +124,11 @@ public:
// Get planner params.
QueryPlannerParams plannerParams;
- fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams);
+ fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams);
WorkingSet ws;
std::unique_ptr<SubplanStage> subplan(
- new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get()));
+ new SubplanStage(&_opCtx, collection, &ws, plannerParams, cq.get()));
// Plan selection should succeed due to falling back on regular planning.
PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, _clock);
@@ -142,7 +142,7 @@ public:
class QueryStageSubplanPlanFromCache : public QueryStageSubplanBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
addIndex(BSON("a" << 1));
addIndex(BSON("a" << 1 << "b" << 1));
@@ -162,17 +162,17 @@ public:
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(query);
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
// Get planner params.
QueryPlannerParams plannerParams;
- fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams);
+ fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams);
WorkingSet ws;
std::unique_ptr<SubplanStage> subplan(
- new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get()));
+ new SubplanStage(&_opCtx, collection, &ws, plannerParams, cq.get()));
PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, _clock);
ASSERT_OK(subplan->pickBestPlan(&yieldPolicy));
@@ -185,7 +185,7 @@ public:
// If we repeat the same query, the plan for the first branch should have come from
// the cache.
ws.clear();
- subplan.reset(new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get()));
+ subplan.reset(new SubplanStage(&_opCtx, collection, &ws, plannerParams, cq.get()));
ASSERT_OK(subplan->pickBestPlan(&yieldPolicy));
@@ -200,7 +200,7 @@ public:
class QueryStageSubplanDontCacheZeroResults : public QueryStageSubplanBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
addIndex(BSON("a" << 1 << "b" << 1));
addIndex(BSON("a" << 1));
@@ -220,17 +220,17 @@ public:
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(query);
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
// Get planner params.
QueryPlannerParams plannerParams;
- fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams);
+ fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams);
WorkingSet ws;
std::unique_ptr<SubplanStage> subplan(
- new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get()));
+ new SubplanStage(&_opCtx, collection, &ws, plannerParams, cq.get()));
PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, _clock);
ASSERT_OK(subplan->pickBestPlan(&yieldPolicy));
@@ -244,7 +244,7 @@ public:
// from the cache (because the first call to pickBestPlan() refrained from creating any
// cache entries).
ws.clear();
- subplan.reset(new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get()));
+ subplan.reset(new SubplanStage(&_opCtx, collection, &ws, plannerParams, cq.get()));
ASSERT_OK(subplan->pickBestPlan(&yieldPolicy));
@@ -259,7 +259,7 @@ public:
class QueryStageSubplanDontCacheTies : public QueryStageSubplanBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
addIndex(BSON("a" << 1 << "b" << 1));
addIndex(BSON("a" << 1 << "c" << 1));
@@ -279,17 +279,17 @@ public:
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(query);
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
// Get planner params.
QueryPlannerParams plannerParams;
- fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams);
+ fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams);
WorkingSet ws;
std::unique_ptr<SubplanStage> subplan(
- new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get()));
+ new SubplanStage(&_opCtx, collection, &ws, plannerParams, cq.get()));
PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, _clock);
ASSERT_OK(subplan->pickBestPlan(&yieldPolicy));
@@ -303,7 +303,7 @@ public:
// from the cache (because the first call to pickBestPlan() refrained from creating any
// cache entries).
ws.clear();
- subplan.reset(new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get()));
+ subplan.reset(new SubplanStage(&_opCtx, collection, &ws, plannerParams, cq.get()));
ASSERT_OK(subplan->pickBestPlan(&yieldPolicy));
@@ -520,7 +520,7 @@ public:
class QueryStageSubplanPlanContainedOr : public QueryStageSubplanBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
addIndex(BSON("b" << 1 << "a" << 1));
addIndex(BSON("c" << 1 << "a" << 1));
@@ -535,17 +535,17 @@ public:
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(query);
auto cq = unittest::assertGet(CanonicalQuery::canonicalize(
- txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
Collection* collection = ctx.getCollection();
// Get planner params.
QueryPlannerParams plannerParams;
- fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams);
+ fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams);
WorkingSet ws;
std::unique_ptr<SubplanStage> subplan(
- new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get()));
+ new SubplanStage(&_opCtx, collection, &ws, plannerParams, cq.get()));
// Plan selection should succeed due to falling back on regular planning.
PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, _clock);
@@ -583,7 +583,7 @@ public:
class QueryStageSubplanPlanRootedOrNE : public QueryStageSubplanBase {
public:
void run() {
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
addIndex(BSON("a" << 1 << "b" << 1));
addIndex(BSON("a" << 1 << "c" << 1));
@@ -597,16 +597,16 @@ public:
qr->setFilter(fromjson("{$or: [{a: 1}, {a: {$ne:1}}]}"));
qr->setSort(BSON("d" << 1));
auto cq = unittest::assertGet(CanonicalQuery::canonicalize(
- txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
+ opCtx(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
Collection* collection = ctx.getCollection();
QueryPlannerParams plannerParams;
- fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams);
+ fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams);
WorkingSet ws;
std::unique_ptr<SubplanStage> subplan(
- new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get()));
+ new SubplanStage(&_opCtx, collection, &ws, plannerParams, cq.get()));
PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, _clock);
ASSERT_OK(subplan->pickBestPlan(&yieldPolicy));
diff --git a/src/mongo/dbtests/query_stage_tests.cpp b/src/mongo/dbtests/query_stage_tests.cpp
index 276b9005b3c..55326de7c63 100644
--- a/src/mongo/dbtests/query_stage_tests.cpp
+++ b/src/mongo/dbtests/query_stage_tests.cpp
@@ -54,8 +54,8 @@ using std::unique_ptr;
class IndexScanBase {
public:
- IndexScanBase() : _client(&_txn) {
- OldClientWriteContext ctx(&_txn, ns());
+ IndexScanBase() : _client(&_opCtx) {
+ OldClientWriteContext ctx(&_opCtx, ns());
for (int i = 0; i < numObj(); ++i) {
BSONObjBuilder bob;
@@ -70,16 +70,16 @@ public:
}
virtual ~IndexScanBase() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
_client.dropCollection(ns());
}
void addIndex(const BSONObj& obj) {
- ASSERT_OK(dbtests::createIndex(&_txn, ns(), obj));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), obj));
}
int countResults(const IndexScanParams& params, BSONObj filterObj = BSONObj()) {
- AutoGetCollectionForRead ctx(&_txn, NamespaceString(ns()));
+ AutoGetCollectionForRead ctx(&_opCtx, NamespaceString(ns()));
const CollatorInterface* collator = nullptr;
StatusWithMatchExpression statusWithMatcher = MatchExpressionParser::parse(
@@ -89,10 +89,10 @@ public:
unique_ptr<WorkingSet> ws = stdx::make_unique<WorkingSet>();
unique_ptr<IndexScan> ix =
- stdx::make_unique<IndexScan>(&_txn, params, ws.get(), filterExpr.get());
+ stdx::make_unique<IndexScan>(&_opCtx, params, ws.get(), filterExpr.get());
auto statusWithPlanExecutor = PlanExecutor::make(
- &_txn, std::move(ws), std::move(ix), ctx.getCollection(), PlanExecutor::YIELD_MANUAL);
+ &_opCtx, std::move(ws), std::move(ix), ctx.getCollection(), PlanExecutor::YIELD_MANUAL);
ASSERT_OK(statusWithPlanExecutor.getStatus());
unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());
@@ -107,7 +107,7 @@ public:
}
void makeGeoData() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
for (int i = 0; i < numObj(); ++i) {
double lat = double(rand()) / RAND_MAX;
@@ -117,10 +117,10 @@ public:
}
IndexDescriptor* getIndex(const BSONObj& obj) {
- AutoGetCollectionForRead ctx(&_txn, NamespaceString(ns()));
+ AutoGetCollectionForRead ctx(&_opCtx, NamespaceString(ns()));
Collection* collection = ctx.getCollection();
std::vector<IndexDescriptor*> indexes;
- collection->getIndexCatalog()->findIndexesByKeyPattern(&_txn, obj, false, &indexes);
+ collection->getIndexCatalog()->findIndexesByKeyPattern(&_opCtx, obj, false, &indexes);
return indexes.empty() ? nullptr : indexes[0];
}
@@ -133,7 +133,7 @@ public:
protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
private:
DBDirectClient _client;
diff --git a/src/mongo/dbtests/query_stage_update.cpp b/src/mongo/dbtests/query_stage_update.cpp
index 06343ca966b..da49550c73a 100644
--- a/src/mongo/dbtests/query_stage_update.cpp
+++ b/src/mongo/dbtests/query_stage_update.cpp
@@ -65,14 +65,14 @@ static const NamespaceString nss("unittests.QueryStageUpdate");
class QueryStageUpdateBase {
public:
- QueryStageUpdateBase() : _client(&_txn) {
- OldClientWriteContext ctx(&_txn, nss.ns());
+ QueryStageUpdateBase() : _client(&_opCtx) {
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
_client.dropCollection(nss.ns());
_client.createCollection(nss.ns());
}
virtual ~QueryStageUpdateBase() {
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
_client.dropCollection(nss.ns());
}
@@ -92,7 +92,7 @@ public:
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(query);
auto statusWithCQ = CanonicalQuery::canonicalize(
- &_txn, std::move(qr), ExtensionsCallbackDisallowExtensions());
+ &_opCtx, std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
@@ -124,7 +124,7 @@ public:
params.direction = CollectionScanParams::FORWARD;
params.tailable = false;
- unique_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL));
+ unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, params, &ws, NULL));
while (!scan->isEOF()) {
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = scan->work(&id);
@@ -146,7 +146,7 @@ public:
params.direction = direction;
params.tailable = false;
- unique_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL));
+ unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, params, &ws, NULL));
while (!scan->isEOF()) {
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = scan->work(&id);
@@ -174,7 +174,7 @@ public:
protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
private:
DBDirectClient _client;
@@ -188,8 +188,8 @@ public:
void run() {
// Run the update.
{
- OldClientWriteContext ctx(&_txn, nss.ns());
- CurOp& curOp = *CurOp::get(_txn);
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
+ CurOp& curOp = *CurOp::get(_opCtx);
OpDebug* opDebug = &curOp.debug();
UpdateDriver driver((UpdateDriver::Options()));
Collection* collection = ctx.getCollection();
@@ -217,17 +217,17 @@ public:
params.canonicalQuery = cq.get();
auto ws = make_unique<WorkingSet>();
- auto eofStage = make_unique<EOFStage>(&_txn);
+ auto eofStage = make_unique<EOFStage>(&_opCtx);
auto updateStage =
- make_unique<UpdateStage>(&_txn, params, ws.get(), collection, eofStage.release());
+ make_unique<UpdateStage>(&_opCtx, params, ws.get(), collection, eofStage.release());
runUpdate(updateStage.get());
}
// Verify the contents of the resulting collection.
{
- AutoGetCollectionForRead ctx(&_txn, nss);
+ AutoGetCollectionForRead ctx(&_opCtx, nss);
Collection* collection = ctx.getCollection();
vector<BSONObj> objs;
@@ -249,7 +249,7 @@ public:
void run() {
// Run the update.
{
- OldClientWriteContext ctx(&_txn, nss.ns());
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
// Populate the collection.
for (int i = 0; i < 10; ++i) {
@@ -257,7 +257,7 @@ public:
}
ASSERT_EQUALS(10U, count(BSONObj()));
- CurOp& curOp = *CurOp::get(_txn);
+ CurOp& curOp = *CurOp::get(_opCtx);
OpDebug* opDebug = &curOp.debug();
UpdateDriver driver((UpdateDriver::Options()));
Database* db = ctx.db();
@@ -294,10 +294,10 @@ public:
updateParams.canonicalQuery = cq.get();
auto ws = make_unique<WorkingSet>();
- auto cs = make_unique<CollectionScan>(&_txn, collScanParams, ws.get(), cq->root());
+ auto cs = make_unique<CollectionScan>(&_opCtx, collScanParams, ws.get(), cq->root());
auto updateStage =
- make_unique<UpdateStage>(&_txn, updateParams, ws.get(), coll, cs.release());
+ make_unique<UpdateStage>(&_opCtx, updateParams, ws.get(), coll, cs.release());
const UpdateStats* stats =
static_cast<const UpdateStats*>(updateStage->getSpecificStats());
@@ -313,11 +313,11 @@ public:
// Remove recordIds[targetDocIndex];
updateStage->saveState();
{
- WriteUnitOfWork wunit(&_txn);
- updateStage->invalidate(&_txn, recordIds[targetDocIndex], INVALIDATION_DELETION);
+ WriteUnitOfWork wunit(&_opCtx);
+ updateStage->invalidate(&_opCtx, recordIds[targetDocIndex], INVALIDATION_DELETION);
wunit.commit();
}
- BSONObj targetDoc = coll->docFor(&_txn, recordIds[targetDocIndex]).value();
+ BSONObj targetDoc = coll->docFor(&_opCtx, recordIds[targetDocIndex]).value();
ASSERT(!targetDoc.isEmpty());
remove(targetDoc);
updateStage->restoreState();
@@ -336,7 +336,7 @@ public:
// Check the contents of the collection.
{
- AutoGetCollectionForRead ctx(&_txn, nss);
+ AutoGetCollectionForRead ctx(&_opCtx, nss);
Collection* collection = ctx.getCollection();
vector<BSONObj> objs;
@@ -370,8 +370,8 @@ public:
ASSERT_EQUALS(10U, count(BSONObj()));
// Various variables we'll need.
- OldClientWriteContext ctx(&_txn, nss.ns());
- OpDebug* opDebug = &CurOp::get(_txn)->debug();
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
+ OpDebug* opDebug = &CurOp::get(_opCtx)->debug();
Collection* coll = ctx.getCollection();
UpdateLifecycleImpl updateLifecycle(nss);
UpdateRequest request(nss);
@@ -397,7 +397,7 @@ public:
// Configure a QueuedDataStage to pass the first object in the collection back in a
// RID_AND_OBJ state.
- auto qds = make_unique<QueuedDataStage>(&_txn, ws.get());
+ auto qds = make_unique<QueuedDataStage>(&_opCtx, ws.get());
WorkingSetID id = ws->allocate();
WorkingSetMember* member = ws->get(id);
member->recordId = recordIds[targetDocIndex];
@@ -411,7 +411,7 @@ public:
updateParams.canonicalQuery = cq.get();
const auto updateStage =
- make_unique<UpdateStage>(&_txn, updateParams, ws.get(), coll, qds.release());
+ make_unique<UpdateStage>(&_opCtx, updateParams, ws.get(), coll, qds.release());
// Should return advanced.
id = WorkingSet::INVALID_ID;
@@ -458,8 +458,8 @@ public:
ASSERT_EQUALS(50U, count(BSONObj()));
// Various variables we'll need.
- OldClientWriteContext ctx(&_txn, nss.ns());
- OpDebug* opDebug = &CurOp::get(_txn)->debug();
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
+ OpDebug* opDebug = &CurOp::get(_opCtx)->debug();
Collection* coll = ctx.getCollection();
UpdateLifecycleImpl updateLifecycle(nss);
UpdateRequest request(nss);
@@ -485,7 +485,7 @@ public:
// Configure a QueuedDataStage to pass the first object in the collection back in a
// RID_AND_OBJ state.
- auto qds = make_unique<QueuedDataStage>(&_txn, ws.get());
+ auto qds = make_unique<QueuedDataStage>(&_opCtx, ws.get());
WorkingSetID id = ws->allocate();
WorkingSetMember* member = ws->get(id);
member->recordId = recordIds[targetDocIndex];
@@ -499,7 +499,7 @@ public:
updateParams.canonicalQuery = cq.get();
auto updateStage =
- make_unique<UpdateStage>(&_txn, updateParams, ws.get(), coll, qds.release());
+ make_unique<UpdateStage>(&_opCtx, updateParams, ws.get(), coll, qds.release());
// Should return advanced.
id = WorkingSet::INVALID_ID;
@@ -542,8 +542,8 @@ class QueryStageUpdateSkipOwnedObjects : public QueryStageUpdateBase {
public:
void run() {
// Various variables we'll need.
- OldClientWriteContext ctx(&_txn, nss.ns());
- OpDebug* opDebug = &CurOp::get(_txn)->debug();
+ OldClientWriteContext ctx(&_opCtx, nss.ns());
+ OpDebug* opDebug = &CurOp::get(_opCtx)->debug();
Collection* coll = ctx.getCollection();
UpdateLifecycleImpl updateLifecycle(nss);
UpdateRequest request(nss);
@@ -562,7 +562,7 @@ public:
ASSERT_OK(driver.parse(request.getUpdates(), request.isMulti()));
// Configure a QueuedDataStage to pass an OWNED_OBJ to the update stage.
- auto qds = make_unique<QueuedDataStage>(&_txn, ws.get());
+ auto qds = make_unique<QueuedDataStage>(&_opCtx, ws.get());
{
WorkingSetID id = ws->allocate();
WorkingSetMember* member = ws->get(id);
@@ -576,7 +576,7 @@ public:
updateParams.canonicalQuery = cq.get();
const auto updateStage =
- make_unique<UpdateStage>(&_txn, updateParams, ws.get(), coll, qds.release());
+ make_unique<UpdateStage>(&_opCtx, updateParams, ws.get(), coll, qds.release());
const UpdateStats* stats = static_cast<const UpdateStats*>(updateStage->getSpecificStats());
// Call work, passing the set up member to the update stage.
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 5288038b1f8..b2b5495573e 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -65,15 +65,15 @@ const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2;
class Base {
public:
- Base() : _scopedXact(&_txn, MODE_X), _lk(_txn.lockState()), _context(&_txn, ns()) {
+ Base() : _scopedXact(&_opCtx, MODE_X), _lk(_opCtx.lockState()), _context(&_opCtx, ns()) {
{
- WriteUnitOfWork wunit(&_txn);
+ WriteUnitOfWork wunit(&_opCtx);
_database = _context.db();
_collection = _database->getCollection(ns());
if (_collection) {
- _database->dropCollection(&_txn, ns());
+ _database->dropCollection(&_opCtx, ns());
}
- _collection = _database->createCollection(&_txn, ns());
+ _collection = _database->createCollection(&_opCtx, ns());
wunit.commit();
}
@@ -82,8 +82,8 @@ public:
~Base() {
try {
- WriteUnitOfWork wunit(&_txn);
- uassertStatusOK(_database->dropCollection(&_txn, ns()));
+ WriteUnitOfWork wunit(&_opCtx);
+ uassertStatusOK(_database->dropCollection(&_opCtx, ns()));
wunit.commit();
} catch (...) {
FAIL("Exception while cleaning up collection");
@@ -97,7 +97,7 @@ protected:
void addIndex(const BSONObj& key) {
Helpers::ensureIndex(
- &_txn, _collection, key, kIndexVersion, false, key.firstElementFieldName());
+ &_opCtx, _collection, key, kIndexVersion, false, key.firstElementFieldName());
}
void insert(const char* s) {
@@ -105,7 +105,7 @@ protected:
}
void insert(const BSONObj& o) {
- WriteUnitOfWork wunit(&_txn);
+ WriteUnitOfWork wunit(&_opCtx);
OpDebug* const nullOpDebug = nullptr;
if (o["_id"].eoo()) {
BSONObjBuilder b;
@@ -113,16 +113,16 @@ protected:
oid.init();
b.appendOID("_id", &oid);
b.appendElements(o);
- _collection->insertDocument(&_txn, b.obj(), nullOpDebug, false);
+ _collection->insertDocument(&_opCtx, b.obj(), nullOpDebug, false);
} else {
- _collection->insertDocument(&_txn, o, nullOpDebug, false);
+ _collection->insertDocument(&_opCtx, o, nullOpDebug, false);
}
wunit.commit();
}
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
ScopedTransaction _scopedXact;
Lock::GlobalWrite _lk;
OldClientContext _context;
@@ -141,12 +141,13 @@ public:
BSONObj query = fromjson("{$or:[{b:2},{c:3}]}");
BSONObj ret;
// Check findOne() returning object.
- ASSERT(Helpers::findOne(&_txn, _collection, query, ret, true));
+ ASSERT(Helpers::findOne(&_opCtx, _collection, query, ret, true));
ASSERT_EQUALS(string("b"), ret.firstElement().fieldName());
// Cross check with findOne() returning location.
ASSERT_BSONOBJ_EQ(
ret,
- _collection->docFor(&_txn, Helpers::findOne(&_txn, _collection, query, true)).value());
+ _collection->docFor(&_opCtx, Helpers::findOne(&_opCtx, _collection, query, true))
+ .value());
}
};
@@ -158,25 +159,27 @@ public:
BSONObj ret;
// Check findOne() returning object, allowing unindexed scan.
- ASSERT(Helpers::findOne(&_txn, _collection, query, ret, false));
+ ASSERT(Helpers::findOne(&_opCtx, _collection, query, ret, false));
// Check findOne() returning location, allowing unindexed scan.
ASSERT_BSONOBJ_EQ(
ret,
- _collection->docFor(&_txn, Helpers::findOne(&_txn, _collection, query, false)).value());
+ _collection->docFor(&_opCtx, Helpers::findOne(&_opCtx, _collection, query, false))
+ .value());
// Check findOne() returning object, requiring indexed scan without index.
- ASSERT_THROWS(Helpers::findOne(&_txn, _collection, query, ret, true),
+ ASSERT_THROWS(Helpers::findOne(&_opCtx, _collection, query, ret, true),
MsgAssertionException);
// Check findOne() returning location, requiring indexed scan without index.
- ASSERT_THROWS(Helpers::findOne(&_txn, _collection, query, true), MsgAssertionException);
+ ASSERT_THROWS(Helpers::findOne(&_opCtx, _collection, query, true), MsgAssertionException);
addIndex(BSON("b" << 1));
// Check findOne() returning object, requiring indexed scan with index.
- ASSERT(Helpers::findOne(&_txn, _collection, query, ret, true));
+ ASSERT(Helpers::findOne(&_opCtx, _collection, query, ret, true));
// Check findOne() returning location, requiring indexed scan with index.
ASSERT_BSONOBJ_EQ(
ret,
- _collection->docFor(&_txn, Helpers::findOne(&_txn, _collection, query, true)).value());
+ _collection->docFor(&_opCtx, Helpers::findOne(&_opCtx, _collection, query, true))
+ .value());
}
};
@@ -185,23 +188,23 @@ public:
void run() {
// We don't normally allow empty objects in the database, but test that we can find
// an empty object (one might be allowed inside a reserved namespace at some point).
- ScopedTransaction transaction(&_txn, MODE_X);
- Lock::GlobalWrite lk(_txn.lockState());
- OldClientContext ctx(&_txn, "unittests.querytests");
+ ScopedTransaction transaction(&_opCtx, MODE_X);
+ Lock::GlobalWrite lk(_opCtx.lockState());
+ OldClientContext ctx(&_opCtx, "unittests.querytests");
{
- WriteUnitOfWork wunit(&_txn);
+ WriteUnitOfWork wunit(&_opCtx);
Database* db = ctx.db();
if (db->getCollection(ns())) {
_collection = NULL;
- db->dropCollection(&_txn, ns());
+ db->dropCollection(&_opCtx, ns());
}
- _collection = db->createCollection(&_txn, ns(), CollectionOptions(), false);
+ _collection = db->createCollection(&_opCtx, ns(), CollectionOptions(), false);
wunit.commit();
}
ASSERT(_collection);
- DBDirectClient cl(&_txn);
+ DBDirectClient cl(&_opCtx);
BSONObj info;
bool ok = cl.runCommand("unittests",
BSON("godinsert"
@@ -214,21 +217,22 @@ public:
insert(BSONObj());
BSONObj query;
BSONObj ret;
- ASSERT(Helpers::findOne(&_txn, _collection, query, ret, false));
+ ASSERT(Helpers::findOne(&_opCtx, _collection, query, ret, false));
ASSERT(ret.isEmpty());
ASSERT_BSONOBJ_EQ(
ret,
- _collection->docFor(&_txn, Helpers::findOne(&_txn, _collection, query, false)).value());
+ _collection->docFor(&_opCtx, Helpers::findOne(&_opCtx, _collection, query, false))
+ .value());
}
};
class ClientBase {
public:
- ClientBase() : _client(&_txn) {
- mongo::LastError::get(_txn.getClient()).reset();
+ ClientBase() : _client(&_opCtx) {
+ mongo::LastError::get(_opCtx.getClient()).reset();
}
virtual ~ClientBase() {
- mongo::LastError::get(_txn.getClient()).reset();
+ mongo::LastError::get(_opCtx.getClient()).reset();
}
protected:
@@ -243,7 +247,7 @@ protected:
}
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
DBDirectClient _client;
};
@@ -259,7 +263,7 @@ public:
a.appendMaxKey("$lt");
BSONObj limit = a.done();
ASSERT(!_client.findOne(ns, QUERY("a" << limit)).isEmpty());
- ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1)));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("a" << 1)));
ASSERT(!_client.findOne(ns, QUERY("a" << limit).hint(BSON("a" << 1))).isEmpty());
}
};
@@ -281,7 +285,7 @@ public:
{
// Check internal server handoff to getmore.
- OldClientWriteContext ctx(&_txn, ns);
+ OldClientWriteContext ctx(&_opCtx, ns);
auto pinnedCursor =
unittest::assertGet(ctx.getCollection()->getCursorManager()->pinCursor(cursorId));
ASSERT_EQUALS(2, pinnedCursor.getCursor()->pos());
@@ -332,11 +336,11 @@ public:
// Check that the cursor has been removed.
{
- AutoGetCollectionForRead ctx(&_txn, NamespaceString(ns));
+ AutoGetCollectionForRead ctx(&_opCtx, NamespaceString(ns));
ASSERT(0 == ctx.getCollection()->getCursorManager()->numCursors());
}
- ASSERT_FALSE(CursorManager::eraseCursorGlobal(&_txn, cursorId));
+ ASSERT_FALSE(CursorManager::eraseCursorGlobal(&_opCtx, cursorId));
// Check that a subsequent get more fails with the cursor removed.
ASSERT_THROWS(_client.getMore(ns, cursorId), UserException);
@@ -380,7 +384,7 @@ public:
// Check that the cursor still exists
{
- AutoGetCollectionForRead ctx(&_txn, NamespaceString(ns));
+ AutoGetCollectionForRead ctx(&_opCtx, NamespaceString(ns));
ASSERT(1 == ctx.getCollection()->getCursorManager()->numCursors());
ASSERT_OK(ctx.getCollection()->getCursorManager()->pinCursor(cursorId).getStatus());
}
@@ -657,9 +661,9 @@ public:
_client.dropCollection(ns);
_client.createCollection(ns, 10, true);
- ScopedTransaction transaction(&_txn, MODE_IX);
- Lock::DBLock lk(_txn.lockState(), "unittests", MODE_X);
- OldClientContext ctx(&_txn, ns);
+ ScopedTransaction transaction(&_opCtx, MODE_IX);
+ Lock::DBLock lk(_opCtx.lockState(), "unittests", MODE_X);
+ OldClientContext ctx(&_opCtx, ns);
BSONObj info;
_client.runCommand("unittests",
@@ -672,11 +676,11 @@ public:
info);
Date_t one = Date_t::fromMillisSinceEpoch(
- LogicalClock::get(&_txn)->reserveTicks(1).asTimestamp().asLL());
+ LogicalClock::get(&_opCtx)->reserveTicks(1).asTimestamp().asLL());
Date_t two = Date_t::fromMillisSinceEpoch(
- LogicalClock::get(&_txn)->reserveTicks(1).asTimestamp().asLL());
+ LogicalClock::get(&_opCtx)->reserveTicks(1).asTimestamp().asLL());
Date_t three = Date_t::fromMillisSinceEpoch(
- LogicalClock::get(&_txn)->reserveTicks(1).asTimestamp().asLL());
+ LogicalClock::get(&_opCtx)->reserveTicks(1).asTimestamp().asLL());
insert(ns, BSON("ts" << one));
insert(ns, BSON("ts" << two));
insert(ns, BSON("ts" << three));
@@ -739,7 +743,7 @@ public:
}
void run() {
const char* ns = "unittests.querytests.BasicCount";
- ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1)));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("a" << 1)));
count(0);
insert(ns, BSON("a" << 3));
count(0);
@@ -764,7 +768,7 @@ public:
}
void run() {
const char* ns = "unittests.querytests.ArrayId";
- ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("_id" << 1)));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("_id" << 1)));
ASSERT(!error());
_client.insert(ns, fromjson("{'_id':[1,2]}"));
ASSERT(error());
@@ -835,7 +839,7 @@ public:
const char* ns = "unittests.querytests.NumericEmbedded";
_client.insert(ns, BSON("a" << BSON("b" << 1)));
ASSERT(!_client.findOne(ns, BSON("a" << BSON("b" << 1.0))).isEmpty());
- ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1)));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("a" << 1)));
ASSERT(!_client.findOne(ns, BSON("a" << BSON("b" << 1.0))).isEmpty());
}
};
@@ -855,7 +859,7 @@ public:
ASSERT_EQUALS(0u, _client.getIndexSpecs(ns()).size());
}
void checkIndex() {
- ASSERT_OK(dbtests::createIndex(&_txn, ns(), BSON("a" << 1)));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), BSON("a" << 1)));
index();
}
void run() {
@@ -878,12 +882,12 @@ public:
}
void run() {
const char* ns = "unittests.querytests.UniqueIndex";
- ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1), true));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("a" << 1), true));
_client.insert(ns, BSON("a" << 4 << "b" << 2));
_client.insert(ns, BSON("a" << 4 << "b" << 3));
ASSERT_EQUALS(1U, _client.count(ns, BSONObj()));
_client.dropCollection(ns);
- ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("b" << 1), true));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("b" << 1), true));
_client.insert(ns, BSON("a" << 4 << "b" << 2));
_client.insert(ns, BSON("a" << 4 << "b" << 3));
ASSERT_EQUALS(2U, _client.count(ns, BSONObj()));
@@ -900,7 +904,7 @@ public:
_client.insert(ns, BSON("a" << 4 << "b" << 2));
_client.insert(ns, BSON("a" << 4 << "b" << 3));
ASSERT_EQUALS(ErrorCodes::DuplicateKey,
- dbtests::createIndex(&_txn, ns, BSON("a" << 1), true));
+ dbtests::createIndex(&_opCtx, ns, BSON("a" << 1), true));
ASSERT_EQUALS(
0U,
_client.count("unittests.system.indexes", BSON("ns" << ns << "name" << NE << "_id_")));
@@ -928,7 +932,7 @@ public:
void run() {
const char* ns = "unittests.querytests.Size";
_client.insert(ns, fromjson("{a:[1,2,3]}"));
- ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1)));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("a" << 1)));
ASSERT(_client.query(ns, QUERY("a" << mongo::BSIZE << 3).hint(BSON("a" << 1)))->more());
}
};
@@ -942,7 +946,7 @@ public:
const char* ns = "unittests.querytests.IndexedArray";
_client.insert(ns, fromjson("{a:[1,2,3]}"));
ASSERT(_client.query(ns, Query("{a:[1,2,3]}"))->more());
- ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1)));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("a" << 1)));
ASSERT(_client.query(ns, Query("{a:{$in:[1,[1,2,3]]}}").hint(BSON("a" << 1)))->more());
ASSERT(_client.query(ns, Query("{a:[1,2,3]}").hint(BSON("a" << 1)))->more()); // SERVER-146
}
@@ -957,7 +961,7 @@ public:
const char* ns = "unittests.querytests.InsideArray";
_client.insert(ns, fromjson("{a:[[1],2]}"));
check("$natural");
- ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1)));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("a" << 1)));
check("a"); // SERVER-146
}
@@ -980,7 +984,7 @@ public:
const char* ns = "unittests.querytests.IndexInsideArrayCorrect";
_client.insert(ns, fromjson("{'_id':1,a:[1]}"));
_client.insert(ns, fromjson("{'_id':2,a:[[1]]}"));
- ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1)));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("a" << 1)));
ASSERT_EQUALS(
1, _client.query(ns, Query("{a:[1]}").hint(BSON("a" << 1)))->next().getIntField("_id"));
}
@@ -995,7 +999,7 @@ public:
const char* ns = "unittests.querytests.SubobjArr";
_client.insert(ns, fromjson("{a:[{b:[1]}]}"));
check("$natural");
- ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1)));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("a" << 1)));
check("a");
}
@@ -1014,7 +1018,7 @@ public:
_client.dropCollection("unittests.querytests.MinMax");
}
void run() {
- ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("a" << 1 << "b" << 1)));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("a" << 1 << "b" << 1)));
_client.insert(ns, BSON("a" << 1 << "b" << 1));
_client.insert(ns, BSON("a" << 1 << "b" << 2));
_client.insert(ns, BSON("a" << 2 << "b" << 1));
@@ -1072,7 +1076,7 @@ public:
}
void run() {
checkMatch();
- ASSERT_OK(dbtests::createIndex(&_txn, _ns, BSON("a" << 1)));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, _ns, BSON("a" << 1)));
checkMatch();
}
@@ -1110,7 +1114,7 @@ public:
}
void run() {
checkMatch();
- ASSERT_OK(dbtests::createIndex(&_txn, _ns, BSON("a" << 1)));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, _ns, BSON("a" << 1)));
checkMatch();
}
@@ -1133,9 +1137,9 @@ private:
class DirectLocking : public ClientBase {
public:
void run() {
- ScopedTransaction transaction(&_txn, MODE_X);
- Lock::GlobalWrite lk(_txn.lockState());
- OldClientContext ctx(&_txn, "unittests.DirectLocking");
+ ScopedTransaction transaction(&_opCtx, MODE_X);
+ Lock::GlobalWrite lk(_opCtx.lockState());
+ OldClientContext ctx(&_opCtx, "unittests.DirectLocking");
_client.remove("a.b", BSONObj());
ASSERT_EQUALS("unittests", ctx.db()->name());
}
@@ -1152,7 +1156,7 @@ public:
_client.insert(ns,
BSON("i"
<< "a"));
- ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("i" << 1)));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("i" << 1)));
ASSERT_EQUALS(1U, _client.count(ns, fromjson("{i:{$in:['a']}}")));
}
};
@@ -1226,7 +1230,7 @@ public:
}
t(ns);
- ASSERT_OK(dbtests::createIndex(&_txn, ns, BSON("7" << 1)));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("7" << 1)));
t(ns);
}
};
@@ -1248,7 +1252,7 @@ public:
}
size_t numCursorsOpen() {
- AutoGetCollectionForRead ctx(&_txn, NamespaceString(_ns));
+ AutoGetCollectionForRead ctx(&_opCtx, NamespaceString(_ns));
Collection* collection = ctx.getCollection();
if (!collection)
return 0;
@@ -1286,7 +1290,7 @@ public:
BSON("x"
<< "eliot"))["z"]
.number());
- ASSERT_OK(dbtests::createIndex(&_txn, ns(), BSON("x" << 1)));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), BSON("x" << 1)));
ASSERT_EQUALS(17,
_client
.findOne(ns(),
@@ -1304,13 +1308,13 @@ public:
}
void run() {
string err;
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
// note that extents are always at least 4KB now - so this will get rounded up
// a bit.
{
- WriteUnitOfWork wunit(&_txn);
- ASSERT(userCreateNS(&_txn,
+ WriteUnitOfWork wunit(&_opCtx);
+ ASSERT(userCreateNS(&_opCtx,
ctx.db(),
ns(),
fromjson("{ capped : true, size : 2000, max: 10000 }"),
@@ -1368,7 +1372,7 @@ public:
HelperTest() : CollectionBase("helpertest") {}
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
for (int i = 0; i < 50; i++) {
insert(ns(), BSON("_id" << i << "x" << i * 2));
@@ -1377,13 +1381,13 @@ public:
ASSERT_EQUALS(50, count());
BSONObj res;
- ASSERT(Helpers::findOne(&_txn, ctx.getCollection(), BSON("_id" << 20), res, true));
+ ASSERT(Helpers::findOne(&_opCtx, ctx.getCollection(), BSON("_id" << 20), res, true));
ASSERT_EQUALS(40, res["x"].numberInt());
- ASSERT(Helpers::findById(&_txn, ctx.db(), ns(), BSON("_id" << 20), res));
+ ASSERT(Helpers::findById(&_opCtx, ctx.db(), ns(), BSON("_id" << 20), res));
ASSERT_EQUALS(40, res["x"].numberInt());
- ASSERT(!Helpers::findById(&_txn, ctx.db(), ns(), BSON("_id" << 200), res));
+ ASSERT(!Helpers::findById(&_opCtx, ctx.db(), ns(), BSON("_id" << 200), res));
long long slow;
long long fast;
@@ -1393,14 +1397,15 @@ public:
{
Timer t;
for (int i = 0; i < n; i++) {
- ASSERT(Helpers::findOne(&_txn, ctx.getCollection(), BSON("_id" << 20), res, true));
+ ASSERT(
+ Helpers::findOne(&_opCtx, ctx.getCollection(), BSON("_id" << 20), res, true));
}
slow = t.micros();
}
{
Timer t;
for (int i = 0; i < n; i++) {
- ASSERT(Helpers::findById(&_txn, ctx.db(), ns(), BSON("_id" << 20), res));
+ ASSERT(Helpers::findById(&_opCtx, ctx.db(), ns(), BSON("_id" << 20), res));
}
fast = t.micros();
}
@@ -1414,7 +1419,7 @@ public:
HelperByIdTest() : CollectionBase("helpertestbyid") {}
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
for (int i = 0; i < 1000; i++) {
insert(ns(), BSON("_id" << i << "x" << i * 2));
@@ -1425,7 +1430,7 @@ public:
BSONObj res;
for (int i = 0; i < 1000; i++) {
- bool found = Helpers::findById(&_txn, ctx.db(), ns(), BSON("_id" << i), res);
+ bool found = Helpers::findById(&_opCtx, ctx.db(), ns(), BSON("_id" << i), res);
ASSERT_EQUALS(i % 2, int(found));
}
}
@@ -1435,7 +1440,7 @@ class ClientCursorTest : public CollectionBase {
ClientCursorTest() : CollectionBase("clientcursortest") {}
void run() {
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
for (int i = 0; i < 1000; i++) {
insert(ns(), BSON("_id" << i << "x" << i * 2));
@@ -1595,9 +1600,9 @@ class CollectionInternalBase : public CollectionBase {
public:
CollectionInternalBase(const char* nsLeaf)
: CollectionBase(nsLeaf),
- _scopedXact(&_txn, MODE_IX),
- _lk(_txn.lockState(), "unittests", MODE_X),
- _ctx(&_txn, ns()) {}
+ _scopedXact(&_opCtx, MODE_IX),
+ _lk(_opCtx.lockState(), "unittests", MODE_X),
+ _ctx(&_opCtx, ns()) {}
private:
ScopedTransaction _scopedXact;
@@ -1631,7 +1636,7 @@ public:
DbMessage dbMessage(message);
QueryMessage queryMessage(dbMessage);
Message result;
- string exhaust = runQuery(&_txn, queryMessage, NamespaceString(ns()), result);
+ string exhaust = runQuery(&_opCtx, queryMessage, NamespaceString(ns()), result);
ASSERT(exhaust.size());
ASSERT_EQUALS(string(ns()), exhaust);
}
@@ -1650,7 +1655,7 @@ public:
ClientCursor* clientCursor = 0;
{
- AutoGetCollectionForRead ctx(&_txn, NamespaceString(ns()));
+ AutoGetCollectionForRead ctx(&_opCtx, NamespaceString(ns()));
auto clientCursorPin =
unittest::assertGet(ctx.getCollection()->getCursorManager()->pinCursor(cursorId));
clientCursor = clientCursorPin.getCursor();
@@ -1702,11 +1707,11 @@ public:
long long cursorId = cursor->getCursorId();
{
- OldClientWriteContext ctx(&_txn, ns());
+ OldClientWriteContext ctx(&_opCtx, ns());
auto pinnedCursor = unittest::assertGet(
ctx.db()->getCollection(ns())->getCursorManager()->pinCursor(cursorId));
string expectedAssertion = str::stream() << "Cannot kill pinned cursor: " << cursorId;
- ASSERT_THROWS_WHAT(CursorManager::eraseCursorGlobal(&_txn, cursorId),
+ ASSERT_THROWS_WHAT(CursorManager::eraseCursorGlobal(&_opCtx, cursorId),
MsgAssertionException,
expectedAssertion);
}
diff --git a/src/mongo/dbtests/replica_set_tests.cpp b/src/mongo/dbtests/replica_set_tests.cpp
index f6cb5b11e21..b11f0111be1 100644
--- a/src/mongo/dbtests/replica_set_tests.cpp
+++ b/src/mongo/dbtests/replica_set_tests.cpp
@@ -46,15 +46,15 @@ ServiceContext::UniqueOperationContext makeOpCtx() {
class ReplicaSetTest : public mongo::unittest::Test {
protected:
void setUp() {
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
_storageInterface = stdx::make_unique<repl::StorageInterfaceMock>();
_replCoordExternalState.reset(
new repl::ReplicationCoordinatorExternalStateImpl(_storageInterface.get()));
}
void tearDown() {
- auto txn = makeOpCtx();
- DBDirectClient client(txn.get());
+ auto opCtx = makeOpCtx();
+ DBDirectClient client(opCtx.get());
client.dropCollection("local.replset.election");
_replCoordExternalState.reset();
@@ -75,57 +75,57 @@ private:
};
TEST_F(ReplicaSetTest, ReplCoordExternalStateStoresLastVoteWithNewTerm) {
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
auto replCoordExternalState = getReplCoordExternalState();
- replCoordExternalState->storeLocalLastVoteDocument(txn.get(), repl::LastVote{2, 1});
+ replCoordExternalState->storeLocalLastVoteDocument(opCtx.get(), repl::LastVote{2, 1});
- auto lastVote = replCoordExternalState->loadLocalLastVoteDocument(txn.get());
+ auto lastVote = replCoordExternalState->loadLocalLastVoteDocument(opCtx.get());
ASSERT_OK(lastVote.getStatus());
ASSERT_EQ(lastVote.getValue().getTerm(), 2);
ASSERT_EQ(lastVote.getValue().getCandidateIndex(), 1);
- replCoordExternalState->storeLocalLastVoteDocument(txn.get(), repl::LastVote{3, 1});
+ replCoordExternalState->storeLocalLastVoteDocument(opCtx.get(), repl::LastVote{3, 1});
- lastVote = replCoordExternalState->loadLocalLastVoteDocument(txn.get());
+ lastVote = replCoordExternalState->loadLocalLastVoteDocument(opCtx.get());
ASSERT_OK(lastVote.getStatus());
ASSERT_EQ(lastVote.getValue().getTerm(), 3);
ASSERT_EQ(lastVote.getValue().getCandidateIndex(), 1);
}
TEST_F(ReplicaSetTest, ReplCoordExternalStateDoesNotStoreLastVoteWithOldTerm) {
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
auto replCoordExternalState = getReplCoordExternalState();
- replCoordExternalState->storeLocalLastVoteDocument(txn.get(), repl::LastVote{2, 1});
+ replCoordExternalState->storeLocalLastVoteDocument(opCtx.get(), repl::LastVote{2, 1});
- auto lastVote = replCoordExternalState->loadLocalLastVoteDocument(txn.get());
+ auto lastVote = replCoordExternalState->loadLocalLastVoteDocument(opCtx.get());
ASSERT_OK(lastVote.getStatus());
ASSERT_EQ(lastVote.getValue().getTerm(), 2);
ASSERT_EQ(lastVote.getValue().getCandidateIndex(), 1);
- replCoordExternalState->storeLocalLastVoteDocument(txn.get(), repl::LastVote{1, 1});
+ replCoordExternalState->storeLocalLastVoteDocument(opCtx.get(), repl::LastVote{1, 1});
- lastVote = replCoordExternalState->loadLocalLastVoteDocument(txn.get());
+ lastVote = replCoordExternalState->loadLocalLastVoteDocument(opCtx.get());
ASSERT_OK(lastVote.getStatus());
ASSERT_EQ(lastVote.getValue().getTerm(), 2);
ASSERT_EQ(lastVote.getValue().getCandidateIndex(), 1);
}
TEST_F(ReplicaSetTest, ReplCoordExternalStateDoesNotStoreLastVoteWithEqualTerm) {
- auto txn = makeOpCtx();
+ auto opCtx = makeOpCtx();
auto replCoordExternalState = getReplCoordExternalState();
- replCoordExternalState->storeLocalLastVoteDocument(txn.get(), repl::LastVote{2, 1});
+ replCoordExternalState->storeLocalLastVoteDocument(opCtx.get(), repl::LastVote{2, 1});
- auto lastVote = replCoordExternalState->loadLocalLastVoteDocument(txn.get());
+ auto lastVote = replCoordExternalState->loadLocalLastVoteDocument(opCtx.get());
ASSERT_OK(lastVote.getStatus());
ASSERT_EQ(lastVote.getValue().getTerm(), 2);
ASSERT_EQ(lastVote.getValue().getCandidateIndex(), 1);
- replCoordExternalState->storeLocalLastVoteDocument(txn.get(), repl::LastVote{2, 2});
+ replCoordExternalState->storeLocalLastVoteDocument(opCtx.get(), repl::LastVote{2, 2});
- lastVote = replCoordExternalState->loadLocalLastVoteDocument(txn.get());
+ lastVote = replCoordExternalState->loadLocalLastVoteDocument(opCtx.get());
ASSERT_OK(lastVote.getStatus());
ASSERT_EQ(lastVote.getValue().getTerm(), 2);
ASSERT_EQ(lastVote.getValue().getCandidateIndex(), 1);
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
index 8a2a597645f..59eedcd86b7 100644
--- a/src/mongo/dbtests/repltests.cpp
+++ b/src/mongo/dbtests/repltests.cpp
@@ -69,36 +69,36 @@ BSONObj f(const char* s) {
class Base {
protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
mutable DBDirectClient _client;
public:
- Base() : _client(&_txn) {
+ Base() : _client(&_opCtx) {
ReplSettings replSettings;
replSettings.setOplogSizeBytes(10 * 1024 * 1024);
replSettings.setMaster(true);
setGlobalReplicationCoordinator(
- new repl::ReplicationCoordinatorMock(_txn.getServiceContext(), replSettings));
+ new repl::ReplicationCoordinatorMock(_opCtx.getServiceContext(), replSettings));
// Since the Client object persists across tests, even though the global
// ReplicationCoordinator does not, we need to clear the last op associated with the client
// to avoid the invariant in ReplClientInfo::setLastOp that the optime only goes forward.
- repl::ReplClientInfo::forClient(_txn.getClient()).clearLastOp_forTest();
+ repl::ReplClientInfo::forClient(_opCtx.getClient()).clearLastOp_forTest();
getGlobalServiceContext()->setOpObserver(stdx::make_unique<OpObserverImpl>());
setOplogCollectionName();
- createOplog(&_txn);
+ createOplog(&_opCtx);
- OldClientWriteContext ctx(&_txn, ns());
- WriteUnitOfWork wuow(&_txn);
+ OldClientWriteContext ctx(&_opCtx, ns());
+ WriteUnitOfWork wuow(&_opCtx);
Collection* c = ctx.db()->getCollection(ns());
if (!c) {
- c = ctx.db()->createCollection(&_txn, ns());
+ c = ctx.db()->createCollection(&_opCtx, ns());
}
- ASSERT(c->getIndexCatalog()->haveIdIndex(&_txn));
+ ASSERT(c->getIndexCatalog()->haveIdIndex(&_opCtx));
wuow.commit();
}
~Base() {
@@ -108,7 +108,7 @@ public:
ReplSettings replSettings;
replSettings.setOplogSizeBytes(10 * 1024 * 1024);
setGlobalReplicationCoordinator(
- new repl::ReplicationCoordinatorMock(_txn.getServiceContext(), replSettings));
+ new repl::ReplicationCoordinatorMock(_opCtx.getServiceContext(), replSettings));
} catch (...) {
FAIL("Exception while cleaning up test");
}
@@ -145,68 +145,68 @@ protected:
return _client.findOne(cllNS(), BSONObj());
}
int count() const {
- ScopedTransaction transaction(&_txn, MODE_X);
- Lock::GlobalWrite lk(_txn.lockState());
- OldClientContext ctx(&_txn, ns());
+ ScopedTransaction transaction(&_opCtx, MODE_X);
+ Lock::GlobalWrite lk(_opCtx.lockState());
+ OldClientContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- WriteUnitOfWork wunit(&_txn);
- coll = db->createCollection(&_txn, ns());
+ WriteUnitOfWork wunit(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns());
wunit.commit();
}
int count = 0;
- auto cursor = coll->getCursor(&_txn);
+ auto cursor = coll->getCursor(&_opCtx);
while (auto record = cursor->next()) {
++count;
}
return count;
}
int opCount() {
- return DBDirectClient(&_txn).query(cllNS(), BSONObj())->itcount();
+ return DBDirectClient(&_opCtx).query(cllNS(), BSONObj())->itcount();
}
void applyAllOperations() {
- ScopedTransaction transaction(&_txn, MODE_X);
- Lock::GlobalWrite lk(_txn.lockState());
+ ScopedTransaction transaction(&_opCtx, MODE_X);
+ Lock::GlobalWrite lk(_opCtx.lockState());
vector<BSONObj> ops;
{
- DBDirectClient db(&_txn);
+ DBDirectClient db(&_opCtx);
auto cursor = db.query(cllNS(), BSONObj());
while (cursor->more()) {
ops.push_back(cursor->nextSafeOwned());
}
}
{
- OldClientContext ctx(&_txn, ns());
+ OldClientContext ctx(&_opCtx, ns());
BSONObjBuilder b;
b.append("host", "localhost");
b.appendTimestamp("syncedTo", 0);
- ReplSource a(&_txn, b.obj());
+ ReplSource a(&_opCtx, b.obj());
for (vector<BSONObj>::iterator i = ops.begin(); i != ops.end(); ++i) {
if (0) {
mongo::unittest::log() << "op: " << *i << endl;
}
- _txn.setReplicatedWrites(false);
- a.applyOperation(&_txn, ctx.db(), *i);
- _txn.setReplicatedWrites(true);
+ _opCtx.setReplicatedWrites(false);
+ a.applyOperation(&_opCtx, ctx.db(), *i);
+ _opCtx.setReplicatedWrites(true);
}
}
}
void printAll(const char* ns) {
- ScopedTransaction transaction(&_txn, MODE_X);
- Lock::GlobalWrite lk(_txn.lockState());
- OldClientContext ctx(&_txn, ns);
+ ScopedTransaction transaction(&_opCtx, MODE_X);
+ Lock::GlobalWrite lk(_opCtx.lockState());
+ OldClientContext ctx(&_opCtx, ns);
Database* db = ctx.db();
Collection* coll = db->getCollection(ns);
if (!coll) {
- WriteUnitOfWork wunit(&_txn);
- coll = db->createCollection(&_txn, ns);
+ WriteUnitOfWork wunit(&_opCtx);
+ coll = db->createCollection(&_opCtx, ns);
wunit.commit();
}
- auto cursor = coll->getCursor(&_txn);
+ auto cursor = coll->getCursor(&_opCtx);
::mongo::log() << "all for " << ns << endl;
while (auto record = cursor->next()) {
::mongo::log() << record->data.releaseToBson() << endl;
@@ -214,35 +214,35 @@ protected:
}
// These deletes don't get logged.
void deleteAll(const char* ns) const {
- ScopedTransaction transaction(&_txn, MODE_X);
- Lock::GlobalWrite lk(_txn.lockState());
- OldClientContext ctx(&_txn, ns);
- WriteUnitOfWork wunit(&_txn);
+ ScopedTransaction transaction(&_opCtx, MODE_X);
+ Lock::GlobalWrite lk(_opCtx.lockState());
+ OldClientContext ctx(&_opCtx, ns);
+ WriteUnitOfWork wunit(&_opCtx);
Database* db = ctx.db();
Collection* coll = db->getCollection(ns);
if (!coll) {
- coll = db->createCollection(&_txn, ns);
+ coll = db->createCollection(&_opCtx, ns);
}
- ASSERT_OK(coll->truncate(&_txn));
+ ASSERT_OK(coll->truncate(&_opCtx));
wunit.commit();
}
void insert(const BSONObj& o) const {
- ScopedTransaction transaction(&_txn, MODE_X);
- Lock::GlobalWrite lk(_txn.lockState());
- OldClientContext ctx(&_txn, ns());
- WriteUnitOfWork wunit(&_txn);
+ ScopedTransaction transaction(&_opCtx, MODE_X);
+ Lock::GlobalWrite lk(_opCtx.lockState());
+ OldClientContext ctx(&_opCtx, ns());
+ WriteUnitOfWork wunit(&_opCtx);
Database* db = ctx.db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(&_txn, ns());
+ coll = db->createCollection(&_opCtx, ns());
}
OpDebug* const nullOpDebug = nullptr;
if (o.hasField("_id")) {
- _txn.setReplicatedWrites(false);
- coll->insertDocument(&_txn, o, nullOpDebug, true);
- _txn.setReplicatedWrites(true);
+ _opCtx.setReplicatedWrites(false);
+ coll->insertDocument(&_opCtx, o, nullOpDebug, true);
+ _opCtx.setReplicatedWrites(true);
wunit.commit();
return;
}
@@ -252,9 +252,9 @@ protected:
id.init();
b.appendOID("_id", &id);
b.appendElements(o);
- _txn.setReplicatedWrites(false);
- coll->insertDocument(&_txn, b.obj(), nullOpDebug, true);
- _txn.setReplicatedWrites(true);
+ _opCtx.setReplicatedWrites(false);
+ coll->insertDocument(&_opCtx, b.obj(), nullOpDebug, true);
+ _opCtx.setReplicatedWrites(true);
wunit.commit();
}
static BSONObj wid(const char* json) {
@@ -1240,7 +1240,7 @@ public:
void reset() const {
deleteAll(ns());
// Add an index on 'a'. This prevents the update from running 'in place'.
- ASSERT_OK(dbtests::createIndex(&_txn, ns(), BSON("a" << 1)));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), BSON("a" << 1)));
insert(fromjson("{'_id':0,z:1}"));
}
};
@@ -1383,7 +1383,7 @@ public:
bool returnEmpty;
SyncTest() : SyncTail(nullptr, SyncTail::MultiSyncApplyFunc()), returnEmpty(false) {}
virtual ~SyncTest() {}
- virtual BSONObj getMissingDoc(OperationContext* txn, Database* db, const BSONObj& o) {
+ virtual BSONObj getMissingDoc(OperationContext* opCtx, Database* db, const BSONObj& o) {
if (returnEmpty) {
BSONObj o;
return o;
@@ -1407,16 +1407,16 @@ public:
<< "foo"
<< "bar"));
- ScopedTransaction transaction(&_txn, MODE_X);
- Lock::GlobalWrite lk(_txn.lockState());
+ ScopedTransaction transaction(&_opCtx, MODE_X);
+ Lock::GlobalWrite lk(_opCtx.lockState());
// this should fail because we can't connect
try {
SyncTail badSource(nullptr, SyncTail::MultiSyncApplyFunc());
badSource.setHostname("localhost:123");
- OldClientContext ctx(&_txn, ns());
- badSource.getMissingDoc(&_txn, ctx.db(), o);
+ OldClientContext ctx(&_opCtx, ns());
+ badSource.getMissingDoc(&_opCtx, ctx.db(), o);
} catch (DBException&) {
threw = true;
}
@@ -1424,7 +1424,7 @@ public:
// now this should succeed
SyncTest t;
- verify(t.shouldRetry(&_txn, o));
+ verify(t.shouldRetry(&_opCtx, o));
verify(!_client
.findOne(ns(),
BSON("_id"
@@ -1433,7 +1433,7 @@ public:
// force it not to find an obj
t.returnEmpty = true;
- verify(!t.shouldRetry(&_txn, o));
+ verify(!t.shouldRetry(&_opCtx, o));
}
};
diff --git a/src/mongo/dbtests/rollbacktests.cpp b/src/mongo/dbtests/rollbacktests.cpp
index b842eb5a9bc..27982080d36 100644
--- a/src/mongo/dbtests/rollbacktests.cpp
+++ b/src/mongo/dbtests/rollbacktests.cpp
@@ -50,13 +50,13 @@ namespace RollbackTests {
namespace {
const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2;
-void dropDatabase(OperationContext* txn, const NamespaceString& nss) {
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite globalWriteLock(txn->lockState());
- Database* db = dbHolder().get(txn, nss.db());
+void dropDatabase(OperationContext* opCtx, const NamespaceString& nss) {
+ ScopedTransaction transaction(opCtx, MODE_X);
+ Lock::GlobalWrite globalWriteLock(opCtx->lockState());
+ Database* db = dbHolder().get(opCtx, nss.db());
if (db) {
- Database::dropDatabase(txn, db);
+ Database::dropDatabase(opCtx, db);
}
}
bool collectionExists(OldClientContext* ctx, const string& ns) {
@@ -65,38 +65,38 @@ bool collectionExists(OldClientContext* ctx, const string& ns) {
dbEntry->getCollectionNamespaces(&names);
return std::find(names.begin(), names.end(), ns) != names.end();
}
-void createCollection(OperationContext* txn, const NamespaceString& nss) {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbXLock(txn->lockState(), nss.db(), MODE_X);
- OldClientContext ctx(txn, nss.ns());
+void createCollection(OperationContext* opCtx, const NamespaceString& nss) {
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ Lock::DBLock dbXLock(opCtx->lockState(), nss.db(), MODE_X);
+ OldClientContext ctx(opCtx, nss.ns());
{
- WriteUnitOfWork uow(txn);
+ WriteUnitOfWork uow(opCtx);
ASSERT(!collectionExists(&ctx, nss.ns()));
- ASSERT_OK(userCreateNS(txn, ctx.db(), nss.ns(), BSONObj(), false));
+ ASSERT_OK(userCreateNS(opCtx, ctx.db(), nss.ns(), BSONObj(), false));
ASSERT(collectionExists(&ctx, nss.ns()));
uow.commit();
}
}
-Status renameCollection(OperationContext* txn,
+Status renameCollection(OperationContext* opCtx,
const NamespaceString& source,
const NamespaceString& target) {
ASSERT_EQ(source.db(), target.db());
- Database* db = dbHolder().get(txn, source.db());
- return db->renameCollection(txn, source.ns(), target.ns(), false);
+ Database* db = dbHolder().get(opCtx, source.db());
+ return db->renameCollection(opCtx, source.ns(), target.ns(), false);
}
-Status truncateCollection(OperationContext* txn, const NamespaceString& nss) {
- Collection* coll = dbHolder().get(txn, nss.db())->getCollection(nss.ns());
- return coll->truncate(txn);
+Status truncateCollection(OperationContext* opCtx, const NamespaceString& nss) {
+ Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(nss.ns());
+ return coll->truncate(opCtx);
}
-void insertRecord(OperationContext* txn, const NamespaceString& nss, const BSONObj& data) {
- Collection* coll = dbHolder().get(txn, nss.db())->getCollection(nss.ns());
+void insertRecord(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& data) {
+ Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(nss.ns());
OpDebug* const nullOpDebug = nullptr;
- ASSERT_OK(coll->insertDocument(txn, data, nullOpDebug, false));
+ ASSERT_OK(coll->insertDocument(opCtx, data, nullOpDebug, false));
}
-void assertOnlyRecord(OperationContext* txn, const NamespaceString& nss, const BSONObj& data) {
- Collection* coll = dbHolder().get(txn, nss.db())->getCollection(nss.ns());
- auto cursor = coll->getCursor(txn);
+void assertOnlyRecord(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& data) {
+ Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(nss.ns());
+ auto cursor = coll->getCursor(opCtx);
auto record = cursor->next();
ASSERT(record);
@@ -104,29 +104,29 @@ void assertOnlyRecord(OperationContext* txn, const NamespaceString& nss, const B
ASSERT(!cursor->next());
}
-void assertEmpty(OperationContext* txn, const NamespaceString& nss) {
- Collection* coll = dbHolder().get(txn, nss.db())->getCollection(nss.ns());
- ASSERT(!coll->getCursor(txn)->next());
+void assertEmpty(OperationContext* opCtx, const NamespaceString& nss) {
+ Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(nss.ns());
+ ASSERT(!coll->getCursor(opCtx)->next());
}
-bool indexExists(OperationContext* txn, const NamespaceString& nss, const string& idxName) {
- Collection* coll = dbHolder().get(txn, nss.db())->getCollection(nss.ns());
- return coll->getIndexCatalog()->findIndexByName(txn, idxName, true) != NULL;
+bool indexExists(OperationContext* opCtx, const NamespaceString& nss, const string& idxName) {
+ Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(nss.ns());
+ return coll->getIndexCatalog()->findIndexByName(opCtx, idxName, true) != NULL;
}
-bool indexReady(OperationContext* txn, const NamespaceString& nss, const string& idxName) {
- Collection* coll = dbHolder().get(txn, nss.db())->getCollection(nss.ns());
- return coll->getIndexCatalog()->findIndexByName(txn, idxName, false) != NULL;
+bool indexReady(OperationContext* opCtx, const NamespaceString& nss, const string& idxName) {
+ Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(nss.ns());
+ return coll->getIndexCatalog()->findIndexByName(opCtx, idxName, false) != NULL;
}
-size_t getNumIndexEntries(OperationContext* txn,
+size_t getNumIndexEntries(OperationContext* opCtx,
const NamespaceString& nss,
const string& idxName) {
size_t numEntries = 0;
- Collection* coll = dbHolder().get(txn, nss.db())->getCollection(nss.ns());
+ Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(nss.ns());
IndexCatalog* catalog = coll->getIndexCatalog();
- IndexDescriptor* desc = catalog->findIndexByName(txn, idxName, false);
+ IndexDescriptor* desc = catalog->findIndexByName(opCtx, idxName, false);
if (desc) {
- auto cursor = catalog->getIndex(desc)->newCursor(txn);
+ auto cursor = catalog->getIndex(desc)->newCursor(opCtx);
for (auto kv = cursor->seek(kMinBSONKey, true); kv; kv = cursor->next()) {
numEntries++;
@@ -136,11 +136,11 @@ size_t getNumIndexEntries(OperationContext* txn,
return numEntries;
}
-void dropIndex(OperationContext* txn, const NamespaceString& nss, const string& idxName) {
- Collection* coll = dbHolder().get(txn, nss.db())->getCollection(nss.ns());
- IndexDescriptor* desc = coll->getIndexCatalog()->findIndexByName(txn, idxName);
+void dropIndex(OperationContext* opCtx, const NamespaceString& nss, const string& idxName) {
+ Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(nss.ns());
+ IndexDescriptor* desc = coll->getIndexCatalog()->findIndexByName(opCtx, idxName);
ASSERT(desc);
- ASSERT_OK(coll->getIndexCatalog()->dropIndex(txn, desc));
+ ASSERT_OK(coll->getIndexCatalog()->dropIndex(opCtx, desc));
}
} // namespace
@@ -149,19 +149,19 @@ class CreateCollection {
public:
void run() {
string ns = "unittests.rollback_create_collection";
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
NamespaceString nss(ns);
- dropDatabase(&txn, nss);
+ dropDatabase(&opCtx, nss);
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock dbXLock(txn.lockState(), nss.db(), MODE_X);
- OldClientContext ctx(&txn, ns);
+ ScopedTransaction transaction(&opCtx, MODE_IX);
+ Lock::DBLock dbXLock(opCtx.lockState(), nss.db(), MODE_X);
+ OldClientContext ctx(&opCtx, ns);
{
- WriteUnitOfWork uow(&txn);
+ WriteUnitOfWork uow(&opCtx);
ASSERT(!collectionExists(&ctx, ns));
auto options = capped ? BSON("capped" << true << "size" << 1000) : BSONObj();
- ASSERT_OK(userCreateNS(&txn, ctx.db(), ns, options, defaultIndexes));
+ ASSERT_OK(userCreateNS(&opCtx, ctx.db(), ns, options, defaultIndexes));
ASSERT(collectionExists(&ctx, ns));
if (!rollback) {
uow.commit();
@@ -180,19 +180,19 @@ class DropCollection {
public:
void run() {
string ns = "unittests.rollback_drop_collection";
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
NamespaceString nss(ns);
- dropDatabase(&txn, nss);
+ dropDatabase(&opCtx, nss);
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock dbXLock(txn.lockState(), nss.db(), MODE_X);
- OldClientContext ctx(&txn, ns);
+ ScopedTransaction transaction(&opCtx, MODE_IX);
+ Lock::DBLock dbXLock(opCtx.lockState(), nss.db(), MODE_X);
+ OldClientContext ctx(&opCtx, ns);
{
- WriteUnitOfWork uow(&txn);
+ WriteUnitOfWork uow(&opCtx);
ASSERT(!collectionExists(&ctx, ns));
auto options = capped ? BSON("capped" << true << "size" << 1000) : BSONObj();
- ASSERT_OK(userCreateNS(&txn, ctx.db(), ns, options, defaultIndexes));
+ ASSERT_OK(userCreateNS(&opCtx, ctx.db(), ns, options, defaultIndexes));
uow.commit();
}
ASSERT(collectionExists(&ctx, ns));
@@ -200,9 +200,9 @@ public:
// END OF SETUP / START OF TEST
{
- WriteUnitOfWork uow(&txn);
+ WriteUnitOfWork uow(&opCtx);
ASSERT(collectionExists(&ctx, ns));
- ASSERT_OK(ctx.db()->dropCollection(&txn, ns));
+ ASSERT_OK(ctx.db()->dropCollection(&opCtx, ns));
ASSERT(!collectionExists(&ctx, ns));
if (!rollback) {
uow.commit();
@@ -222,21 +222,21 @@ public:
void run() {
NamespaceString source("unittests.rollback_rename_collection_src");
NamespaceString target("unittests.rollback_rename_collection_dest");
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
- dropDatabase(&txn, source);
- dropDatabase(&txn, target);
+ dropDatabase(&opCtx, source);
+ dropDatabase(&opCtx, target);
- ScopedTransaction transaction(&txn, MODE_X);
- Lock::GlobalWrite globalWriteLock(txn.lockState());
- OldClientContext ctx(&txn, source.ns());
+ ScopedTransaction transaction(&opCtx, MODE_X);
+ Lock::GlobalWrite globalWriteLock(opCtx.lockState());
+ OldClientContext ctx(&opCtx, source.ns());
{
- WriteUnitOfWork uow(&txn);
+ WriteUnitOfWork uow(&opCtx);
ASSERT(!collectionExists(&ctx, source.ns()));
ASSERT(!collectionExists(&ctx, target.ns()));
- ASSERT_OK(userCreateNS(&txn, ctx.db(), source.ns(), BSONObj(), defaultIndexes));
+ ASSERT_OK(userCreateNS(&opCtx, ctx.db(), source.ns(), BSONObj(), defaultIndexes));
uow.commit();
}
ASSERT(collectionExists(&ctx, source.ns()));
@@ -245,8 +245,8 @@ public:
// END OF SETUP / START OF TEST
{
- WriteUnitOfWork uow(&txn);
- ASSERT_OK(renameCollection(&txn, source, target));
+ WriteUnitOfWork uow(&opCtx);
+ ASSERT_OK(renameCollection(&opCtx, source, target));
ASSERT(!collectionExists(&ctx, source.ns()));
ASSERT(collectionExists(&ctx, target.ns()));
if (!rollback) {
@@ -269,15 +269,15 @@ public:
void run() {
NamespaceString source("unittests.rollback_rename_droptarget_collection_src");
NamespaceString target("unittests.rollback_rename_droptarget_collection_dest");
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
- dropDatabase(&txn, source);
- dropDatabase(&txn, target);
+ dropDatabase(&opCtx, source);
+ dropDatabase(&opCtx, target);
- ScopedTransaction transaction(&txn, MODE_X);
- Lock::GlobalWrite globalWriteLock(txn.lockState());
- OldClientContext ctx(&txn, source.ns());
+ ScopedTransaction transaction(&opCtx, MODE_X);
+ Lock::GlobalWrite globalWriteLock(opCtx.lockState());
+ OldClientContext ctx(&opCtx, source.ns());
BSONObj sourceDoc = BSON("_id"
<< "source");
@@ -285,31 +285,31 @@ public:
<< "target");
{
- WriteUnitOfWork uow(&txn);
+ WriteUnitOfWork uow(&opCtx);
ASSERT(!collectionExists(&ctx, source.ns()));
ASSERT(!collectionExists(&ctx, target.ns()));
- ASSERT_OK(userCreateNS(&txn, ctx.db(), source.ns(), BSONObj(), defaultIndexes));
- ASSERT_OK(userCreateNS(&txn, ctx.db(), target.ns(), BSONObj(), defaultIndexes));
+ ASSERT_OK(userCreateNS(&opCtx, ctx.db(), source.ns(), BSONObj(), defaultIndexes));
+ ASSERT_OK(userCreateNS(&opCtx, ctx.db(), target.ns(), BSONObj(), defaultIndexes));
- insertRecord(&txn, source, sourceDoc);
- insertRecord(&txn, target, targetDoc);
+ insertRecord(&opCtx, source, sourceDoc);
+ insertRecord(&opCtx, target, targetDoc);
uow.commit();
}
ASSERT(collectionExists(&ctx, source.ns()));
ASSERT(collectionExists(&ctx, target.ns()));
- assertOnlyRecord(&txn, source, sourceDoc);
- assertOnlyRecord(&txn, target, targetDoc);
+ assertOnlyRecord(&opCtx, source, sourceDoc);
+ assertOnlyRecord(&opCtx, target, targetDoc);
// END OF SETUP / START OF TEST
{
- WriteUnitOfWork uow(&txn);
- ASSERT_OK(ctx.db()->dropCollection(&txn, target.ns()));
- ASSERT_OK(renameCollection(&txn, source, target));
+ WriteUnitOfWork uow(&opCtx);
+ ASSERT_OK(ctx.db()->dropCollection(&opCtx, target.ns()));
+ ASSERT_OK(renameCollection(&opCtx, source, target));
ASSERT(!collectionExists(&ctx, source.ns()));
ASSERT(collectionExists(&ctx, target.ns()));
- assertOnlyRecord(&txn, target, sourceDoc);
+ assertOnlyRecord(&opCtx, target, sourceDoc);
if (!rollback) {
uow.commit();
}
@@ -317,12 +317,12 @@ public:
if (rollback) {
ASSERT(collectionExists(&ctx, source.ns()));
ASSERT(collectionExists(&ctx, target.ns()));
- assertOnlyRecord(&txn, source, sourceDoc);
- assertOnlyRecord(&txn, target, targetDoc);
+ assertOnlyRecord(&opCtx, source, sourceDoc);
+ assertOnlyRecord(&opCtx, target, targetDoc);
} else {
ASSERT(!collectionExists(&ctx, source.ns()));
ASSERT(collectionExists(&ctx, target.ns()));
- assertOnlyRecord(&txn, target, sourceDoc);
+ assertOnlyRecord(&opCtx, target, sourceDoc);
}
}
};
@@ -332,13 +332,13 @@ class ReplaceCollection {
public:
void run() {
NamespaceString nss("unittests.rollback_replace_collection");
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- dropDatabase(&txn, nss);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ dropDatabase(&opCtx, nss);
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock dbXLock(txn.lockState(), nss.db(), MODE_X);
- OldClientContext ctx(&txn, nss.ns());
+ ScopedTransaction transaction(&opCtx, MODE_IX);
+ Lock::DBLock dbXLock(opCtx.lockState(), nss.db(), MODE_X);
+ OldClientContext ctx(&opCtx, nss.ns());
BSONObj oldDoc = BSON("_id"
<< "old");
@@ -346,34 +346,34 @@ public:
<< "new");
{
- WriteUnitOfWork uow(&txn);
+ WriteUnitOfWork uow(&opCtx);
ASSERT(!collectionExists(&ctx, nss.ns()));
- ASSERT_OK(userCreateNS(&txn, ctx.db(), nss.ns(), BSONObj(), defaultIndexes));
- insertRecord(&txn, nss, oldDoc);
+ ASSERT_OK(userCreateNS(&opCtx, ctx.db(), nss.ns(), BSONObj(), defaultIndexes));
+ insertRecord(&opCtx, nss, oldDoc);
uow.commit();
}
ASSERT(collectionExists(&ctx, nss.ns()));
- assertOnlyRecord(&txn, nss, oldDoc);
+ assertOnlyRecord(&opCtx, nss, oldDoc);
// END OF SETUP / START OF TEST
{
- WriteUnitOfWork uow(&txn);
- ASSERT_OK(ctx.db()->dropCollection(&txn, nss.ns()));
+ WriteUnitOfWork uow(&opCtx);
+ ASSERT_OK(ctx.db()->dropCollection(&opCtx, nss.ns()));
ASSERT(!collectionExists(&ctx, nss.ns()));
- ASSERT_OK(userCreateNS(&txn, ctx.db(), nss.ns(), BSONObj(), defaultIndexes));
+ ASSERT_OK(userCreateNS(&opCtx, ctx.db(), nss.ns(), BSONObj(), defaultIndexes));
ASSERT(collectionExists(&ctx, nss.ns()));
- insertRecord(&txn, nss, newDoc);
- assertOnlyRecord(&txn, nss, newDoc);
+ insertRecord(&opCtx, nss, newDoc);
+ assertOnlyRecord(&opCtx, nss, newDoc);
if (!rollback) {
uow.commit();
}
}
ASSERT(collectionExists(&ctx, nss.ns()));
if (rollback) {
- assertOnlyRecord(&txn, nss, oldDoc);
+ assertOnlyRecord(&opCtx, nss, oldDoc);
} else {
- assertOnlyRecord(&txn, nss, newDoc);
+ assertOnlyRecord(&opCtx, nss, newDoc);
}
}
};
@@ -383,27 +383,27 @@ class CreateDropCollection {
public:
void run() {
NamespaceString nss("unittests.rollback_create_drop_collection");
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- dropDatabase(&txn, nss);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ dropDatabase(&opCtx, nss);
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock dbXLock(txn.lockState(), nss.db(), MODE_X);
- OldClientContext ctx(&txn, nss.ns());
+ ScopedTransaction transaction(&opCtx, MODE_IX);
+ Lock::DBLock dbXLock(opCtx.lockState(), nss.db(), MODE_X);
+ OldClientContext ctx(&opCtx, nss.ns());
BSONObj doc = BSON("_id"
<< "example string");
ASSERT(!collectionExists(&ctx, nss.ns()));
{
- WriteUnitOfWork uow(&txn);
+ WriteUnitOfWork uow(&opCtx);
- ASSERT_OK(userCreateNS(&txn, ctx.db(), nss.ns(), BSONObj(), defaultIndexes));
+ ASSERT_OK(userCreateNS(&opCtx, ctx.db(), nss.ns(), BSONObj(), defaultIndexes));
ASSERT(collectionExists(&ctx, nss.ns()));
- insertRecord(&txn, nss, doc);
- assertOnlyRecord(&txn, nss, doc);
+ insertRecord(&opCtx, nss, doc);
+ assertOnlyRecord(&opCtx, nss, doc);
- ASSERT_OK(ctx.db()->dropCollection(&txn, nss.ns()));
+ ASSERT_OK(ctx.db()->dropCollection(&opCtx, nss.ns()));
ASSERT(!collectionExists(&ctx, nss.ns()));
if (!rollback) {
@@ -419,37 +419,37 @@ class TruncateCollection {
public:
void run() {
NamespaceString nss("unittests.rollback_truncate_collection");
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- dropDatabase(&txn, nss);
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ dropDatabase(&opCtx, nss);
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock dbXLock(txn.lockState(), nss.db(), MODE_X);
- OldClientContext ctx(&txn, nss.ns());
+ ScopedTransaction transaction(&opCtx, MODE_IX);
+ Lock::DBLock dbXLock(opCtx.lockState(), nss.db(), MODE_X);
+ OldClientContext ctx(&opCtx, nss.ns());
BSONObj doc = BSON("_id"
<< "foo");
ASSERT(!collectionExists(&ctx, nss.ns()));
{
- WriteUnitOfWork uow(&txn);
+ WriteUnitOfWork uow(&opCtx);
- ASSERT_OK(userCreateNS(&txn, ctx.db(), nss.ns(), BSONObj(), defaultIndexes));
+ ASSERT_OK(userCreateNS(&opCtx, ctx.db(), nss.ns(), BSONObj(), defaultIndexes));
ASSERT(collectionExists(&ctx, nss.ns()));
- insertRecord(&txn, nss, doc);
- assertOnlyRecord(&txn, nss, doc);
+ insertRecord(&opCtx, nss, doc);
+ assertOnlyRecord(&opCtx, nss, doc);
uow.commit();
}
- assertOnlyRecord(&txn, nss, doc);
+ assertOnlyRecord(&opCtx, nss, doc);
// END OF SETUP / START OF TEST
{
- WriteUnitOfWork uow(&txn);
+ WriteUnitOfWork uow(&opCtx);
- ASSERT_OK(truncateCollection(&txn, nss));
+ ASSERT_OK(truncateCollection(&opCtx, nss));
ASSERT(collectionExists(&ctx, nss.ns()));
- assertEmpty(&txn, nss);
+ assertEmpty(&opCtx, nss);
if (!rollback) {
uow.commit();
@@ -457,9 +457,9 @@ public:
}
ASSERT(collectionExists(&ctx, nss.ns()));
if (rollback) {
- assertOnlyRecord(&txn, nss, doc);
+ assertOnlyRecord(&opCtx, nss, doc);
} else {
- assertEmpty(&txn, nss);
+ assertEmpty(&opCtx, nss);
}
}
};
@@ -469,14 +469,14 @@ class CreateIndex {
public:
void run() {
string ns = "unittests.rollback_create_index";
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
NamespaceString nss(ns);
- dropDatabase(&txn, nss);
- createCollection(&txn, nss);
+ dropDatabase(&opCtx, nss);
+ createCollection(&opCtx, nss);
- ScopedTransaction transaction(&txn, MODE_IX);
- AutoGetDb autoDb(&txn, nss.db(), MODE_X);
+ ScopedTransaction transaction(&opCtx, MODE_IX);
+ AutoGetDb autoDb(&opCtx, nss.db(), MODE_X);
Collection* coll = autoDb.getDb()->getCollection(ns);
IndexCatalog* catalog = coll->getIndexCatalog();
@@ -488,20 +488,20 @@ public:
// END SETUP / START TEST
{
- WriteUnitOfWork uow(&txn);
- ASSERT_OK(catalog->createIndexOnEmptyCollection(&txn, spec));
- insertRecord(&txn, nss, BSON("a" << 1));
- insertRecord(&txn, nss, BSON("a" << 2));
- insertRecord(&txn, nss, BSON("a" << 3));
+ WriteUnitOfWork uow(&opCtx);
+ ASSERT_OK(catalog->createIndexOnEmptyCollection(&opCtx, spec));
+ insertRecord(&opCtx, nss, BSON("a" << 1));
+ insertRecord(&opCtx, nss, BSON("a" << 2));
+ insertRecord(&opCtx, nss, BSON("a" << 3));
if (!rollback) {
uow.commit();
}
}
if (rollback) {
- ASSERT(!indexExists(&txn, nss, idxName));
+ ASSERT(!indexExists(&opCtx, nss, idxName));
} else {
- ASSERT(indexReady(&txn, nss, idxName));
+ ASSERT(indexReady(&opCtx, nss, idxName));
}
}
};
@@ -511,14 +511,14 @@ class DropIndex {
public:
void run() {
string ns = "unittests.rollback_drop_index";
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
NamespaceString nss(ns);
- dropDatabase(&txn, nss);
- createCollection(&txn, nss);
+ dropDatabase(&opCtx, nss);
+ createCollection(&opCtx, nss);
- ScopedTransaction transaction(&txn, MODE_IX);
- AutoGetDb autoDb(&txn, nss.db(), MODE_X);
+ ScopedTransaction transaction(&opCtx, MODE_IX);
+ AutoGetDb autoDb(&opCtx, nss.db(), MODE_X);
Collection* coll = autoDb.getDb()->getCollection(ns);
IndexCatalog* catalog = coll->getIndexCatalog();
@@ -528,34 +528,34 @@ public:
<< static_cast<int>(kIndexVersion));
{
- WriteUnitOfWork uow(&txn);
- ASSERT_OK(catalog->createIndexOnEmptyCollection(&txn, spec));
- insertRecord(&txn, nss, BSON("a" << 1));
- insertRecord(&txn, nss, BSON("a" << 2));
- insertRecord(&txn, nss, BSON("a" << 3));
+ WriteUnitOfWork uow(&opCtx);
+ ASSERT_OK(catalog->createIndexOnEmptyCollection(&opCtx, spec));
+ insertRecord(&opCtx, nss, BSON("a" << 1));
+ insertRecord(&opCtx, nss, BSON("a" << 2));
+ insertRecord(&opCtx, nss, BSON("a" << 3));
uow.commit();
}
- ASSERT(indexReady(&txn, nss, idxName));
- ASSERT_EQ(3u, getNumIndexEntries(&txn, nss, idxName));
+ ASSERT(indexReady(&opCtx, nss, idxName));
+ ASSERT_EQ(3u, getNumIndexEntries(&opCtx, nss, idxName));
// END SETUP / START TEST
{
- WriteUnitOfWork uow(&txn);
+ WriteUnitOfWork uow(&opCtx);
- dropIndex(&txn, nss, idxName);
- ASSERT(!indexExists(&txn, nss, idxName));
+ dropIndex(&opCtx, nss, idxName);
+ ASSERT(!indexExists(&opCtx, nss, idxName));
if (!rollback) {
uow.commit();
}
}
if (rollback) {
- ASSERT(indexExists(&txn, nss, idxName));
- ASSERT(indexReady(&txn, nss, idxName));
- ASSERT_EQ(3u, getNumIndexEntries(&txn, nss, idxName));
+ ASSERT(indexExists(&opCtx, nss, idxName));
+ ASSERT(indexReady(&opCtx, nss, idxName));
+ ASSERT_EQ(3u, getNumIndexEntries(&opCtx, nss, idxName));
} else {
- ASSERT(!indexExists(&txn, nss, idxName));
+ ASSERT(!indexExists(&opCtx, nss, idxName));
}
}
};
@@ -565,14 +565,14 @@ class CreateDropIndex {
public:
void run() {
string ns = "unittests.rollback_create_drop_index";
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
NamespaceString nss(ns);
- dropDatabase(&txn, nss);
- createCollection(&txn, nss);
+ dropDatabase(&opCtx, nss);
+ createCollection(&opCtx, nss);
- ScopedTransaction transaction(&txn, MODE_IX);
- AutoGetDb autoDb(&txn, nss.db(), MODE_X);
+ ScopedTransaction transaction(&opCtx, MODE_IX);
+ AutoGetDb autoDb(&opCtx, nss.db(), MODE_X);
Collection* coll = autoDb.getDb()->getCollection(ns);
IndexCatalog* catalog = coll->getIndexCatalog();
@@ -584,24 +584,24 @@ public:
// END SETUP / START TEST
{
- WriteUnitOfWork uow(&txn);
+ WriteUnitOfWork uow(&opCtx);
- ASSERT_OK(catalog->createIndexOnEmptyCollection(&txn, spec));
- insertRecord(&txn, nss, BSON("a" << 1));
- insertRecord(&txn, nss, BSON("a" << 2));
- insertRecord(&txn, nss, BSON("a" << 3));
- ASSERT(indexExists(&txn, nss, idxName));
- ASSERT_EQ(3u, getNumIndexEntries(&txn, nss, idxName));
+ ASSERT_OK(catalog->createIndexOnEmptyCollection(&opCtx, spec));
+ insertRecord(&opCtx, nss, BSON("a" << 1));
+ insertRecord(&opCtx, nss, BSON("a" << 2));
+ insertRecord(&opCtx, nss, BSON("a" << 3));
+ ASSERT(indexExists(&opCtx, nss, idxName));
+ ASSERT_EQ(3u, getNumIndexEntries(&opCtx, nss, idxName));
- dropIndex(&txn, nss, idxName);
- ASSERT(!indexExists(&txn, nss, idxName));
+ dropIndex(&opCtx, nss, idxName);
+ ASSERT(!indexExists(&opCtx, nss, idxName));
if (!rollback) {
uow.commit();
}
}
- ASSERT(!indexExists(&txn, nss, idxName));
+ ASSERT(!indexExists(&opCtx, nss, idxName));
}
};
@@ -610,14 +610,14 @@ class SetIndexHead {
public:
void run() {
string ns = "unittests.rollback_set_index_head";
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
NamespaceString nss(ns);
- dropDatabase(&txn, nss);
- createCollection(&txn, nss);
+ dropDatabase(&opCtx, nss);
+ createCollection(&opCtx, nss);
- ScopedTransaction transaction(&txn, MODE_IX);
- AutoGetDb autoDb(&txn, nss.db(), MODE_X);
+ ScopedTransaction transaction(&opCtx, MODE_IX);
+ AutoGetDb autoDb(&opCtx, nss.db(), MODE_X);
Collection* coll = autoDb.getDb()->getCollection(ns);
IndexCatalog* catalog = coll->getIndexCatalog();
@@ -627,19 +627,19 @@ public:
<< static_cast<int>(kIndexVersion));
{
- WriteUnitOfWork uow(&txn);
- ASSERT_OK(catalog->createIndexOnEmptyCollection(&txn, spec));
+ WriteUnitOfWork uow(&opCtx);
+ ASSERT_OK(catalog->createIndexOnEmptyCollection(&opCtx, spec));
uow.commit();
}
- IndexDescriptor* indexDesc = catalog->findIndexByName(&txn, idxName);
+ IndexDescriptor* indexDesc = catalog->findIndexByName(&opCtx, idxName);
invariant(indexDesc);
const IndexCatalogEntry* ice = catalog->getEntry(indexDesc);
invariant(ice);
HeadManager* headManager = ice->headManager();
- const RecordId oldHead = headManager->getHead(&txn);
- ASSERT_EQ(oldHead, ice->head(&txn));
+ const RecordId oldHead = headManager->getHead(&opCtx);
+ ASSERT_EQ(oldHead, ice->head(&opCtx));
const RecordId dummyHead(123, 456);
ASSERT_NE(oldHead, dummyHead);
@@ -647,12 +647,12 @@ public:
// END SETUP / START TEST
{
- WriteUnitOfWork uow(&txn);
+ WriteUnitOfWork uow(&opCtx);
- headManager->setHead(&txn, dummyHead);
+ headManager->setHead(&opCtx, dummyHead);
- ASSERT_EQ(ice->head(&txn), dummyHead);
- ASSERT_EQ(headManager->getHead(&txn), dummyHead);
+ ASSERT_EQ(ice->head(&opCtx), dummyHead);
+ ASSERT_EQ(headManager->getHead(&opCtx), dummyHead);
if (!rollback) {
uow.commit();
@@ -660,11 +660,11 @@ public:
}
if (rollback) {
- ASSERT_EQ(ice->head(&txn), oldHead);
- ASSERT_EQ(headManager->getHead(&txn), oldHead);
+ ASSERT_EQ(ice->head(&opCtx), oldHead);
+ ASSERT_EQ(headManager->getHead(&opCtx), oldHead);
} else {
- ASSERT_EQ(ice->head(&txn), dummyHead);
- ASSERT_EQ(headManager->getHead(&txn), dummyHead);
+ ASSERT_EQ(ice->head(&opCtx), dummyHead);
+ ASSERT_EQ(headManager->getHead(&opCtx), dummyHead);
}
}
};
@@ -674,14 +674,14 @@ class CreateCollectionAndIndexes {
public:
void run() {
string ns = "unittests.rollback_create_collection_and_indexes";
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
NamespaceString nss(ns);
- dropDatabase(&txn, nss);
+ dropDatabase(&opCtx, nss);
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock dbXLock(txn.lockState(), nss.db(), MODE_X);
- OldClientContext ctx(&txn, nss.ns());
+ ScopedTransaction transaction(&opCtx, MODE_IX);
+ Lock::DBLock dbXLock(opCtx.lockState(), nss.db(), MODE_X);
+ OldClientContext ctx(&opCtx, nss.ns());
string idxNameA = "indexA";
string idxNameB = "indexB";
@@ -696,16 +696,16 @@ public:
// END SETUP / START TEST
{
- WriteUnitOfWork uow(&txn);
+ WriteUnitOfWork uow(&opCtx);
ASSERT(!collectionExists(&ctx, nss.ns()));
- ASSERT_OK(userCreateNS(&txn, ctx.db(), nss.ns(), BSONObj(), false));
+ ASSERT_OK(userCreateNS(&opCtx, ctx.db(), nss.ns(), BSONObj(), false));
ASSERT(collectionExists(&ctx, nss.ns()));
Collection* coll = ctx.db()->getCollection(ns);
IndexCatalog* catalog = coll->getIndexCatalog();
- ASSERT_OK(catalog->createIndexOnEmptyCollection(&txn, specA));
- ASSERT_OK(catalog->createIndexOnEmptyCollection(&txn, specB));
- ASSERT_OK(catalog->createIndexOnEmptyCollection(&txn, specC));
+ ASSERT_OK(catalog->createIndexOnEmptyCollection(&opCtx, specA));
+ ASSERT_OK(catalog->createIndexOnEmptyCollection(&opCtx, specB));
+ ASSERT_OK(catalog->createIndexOnEmptyCollection(&opCtx, specC));
if (!rollback) {
uow.commit();
@@ -715,9 +715,9 @@ public:
ASSERT(!collectionExists(&ctx, ns));
} else {
ASSERT(collectionExists(&ctx, ns));
- ASSERT(indexReady(&txn, nss, idxNameA));
- ASSERT(indexReady(&txn, nss, idxNameB));
- ASSERT(indexReady(&txn, nss, idxNameC));
+ ASSERT(indexReady(&opCtx, nss, idxNameA));
+ ASSERT(indexReady(&opCtx, nss, idxNameB));
+ ASSERT(indexReady(&opCtx, nss, idxNameC));
}
}
};
diff --git a/src/mongo/dbtests/sort_key_generator_test.cpp b/src/mongo/dbtests/sort_key_generator_test.cpp
index c808a52623a..0f25f214c07 100644
--- a/src/mongo/dbtests/sort_key_generator_test.cpp
+++ b/src/mongo/dbtests/sort_key_generator_test.cpp
@@ -55,7 +55,7 @@ BSONObj extractSortKey(const char* sortSpec,
const char* query,
const CollatorInterface* collator) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
WorkingSetMember wsm;
wsm.obj = Snapshotted<BSONObj>(SnapshotId(), fromjson(doc));
@@ -63,7 +63,7 @@ BSONObj extractSortKey(const char* sortSpec,
BSONObj sortKey;
auto sortKeyGen = stdx::make_unique<SortKeyGenerator>(
- txn.get(), fromjson(sortSpec), fromjson(query), collator);
+ opCtx.get(), fromjson(sortSpec), fromjson(query), collator);
ASSERT_OK(sortKeyGen->getSortKey(wsm, &sortKey));
return sortKey;
@@ -83,7 +83,7 @@ BSONObj extractSortKeyCovered(const char* sortSpec,
const IndexKeyDatum& ikd,
const CollatorInterface* collator) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
WorkingSet ws;
WorkingSetID wsid = ws.allocate();
@@ -93,7 +93,7 @@ BSONObj extractSortKeyCovered(const char* sortSpec,
BSONObj sortKey;
auto sortKeyGen =
- stdx::make_unique<SortKeyGenerator>(txn.get(), fromjson(sortSpec), BSONObj(), collator);
+ stdx::make_unique<SortKeyGenerator>(opCtx.get(), fromjson(sortSpec), BSONObj(), collator);
ASSERT_OK(sortKeyGen->getSortKey(*wsm, &sortKey));
return sortKey;
diff --git a/src/mongo/dbtests/updatetests.cpp b/src/mongo/dbtests/updatetests.cpp
index 92bd635d0af..7b62f5ad4a0 100644
--- a/src/mongo/dbtests/updatetests.cpp
+++ b/src/mongo/dbtests/updatetests.cpp
@@ -56,11 +56,11 @@ namespace dps = ::mongo::dotted_path_support;
class ClientBase {
public:
- ClientBase() : _client(&_txn) {
- mongo::LastError::get(_txn.getClient()).reset();
+ ClientBase() : _client(&_opCtx) {
+ mongo::LastError::get(_opCtx.getClient()).reset();
}
virtual ~ClientBase() {
- mongo::LastError::get(_txn.getClient()).reset();
+ mongo::LastError::get(_opCtx.getClient()).reset();
}
protected:
@@ -75,7 +75,7 @@ protected:
}
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
DBDirectClient _client;
};
@@ -1717,7 +1717,7 @@ public:
class IndexParentOfMod : public SetBase {
public:
void run() {
- ASSERT_OK(dbtests::createIndex(&_txn, ns(), BSON("a" << 1)));
+ ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), BSON("a" << 1)));
_client.insert(ns(), fromjson("{'_id':0}"));
_client.update(ns(), Query(), fromjson("{$set:{'a.b':4}}"));
ASSERT_BSONOBJ_EQ(fromjson("{'_id':0,a:{b:4}}"), _client.findOne(ns(), Query()));
diff --git a/src/mongo/dbtests/validate_tests.cpp b/src/mongo/dbtests/validate_tests.cpp
index 3eb1706fbce..9b949ec3b93 100644
--- a/src/mongo/dbtests/validate_tests.cpp
+++ b/src/mongo/dbtests/validate_tests.cpp
@@ -59,7 +59,7 @@ static const char* const _ns = "unittests.validate_tests";
*/
class ValidateBase {
public:
- explicit ValidateBase(bool full) : _ctx(&_txn, _ns), _client(&_txn), _full(full) {
+ explicit ValidateBase(bool full) : _ctx(&_opCtx, _ns), _client(&_opCtx), _full(full) {
_client.createCollection(_ns);
}
~ValidateBase() {
@@ -75,7 +75,7 @@ protected:
ValidateResults results;
BSONObjBuilder output;
ASSERT_OK(collection()->validate(
- &_txn, _full ? kValidateFull : kValidateIndex, &results, &output));
+ &_opCtx, _full ? kValidateFull : kValidateIndex, &results, &output));
// Check if errors are reported if and only if valid is set to false.
ASSERT_EQ(results.valid, results.errors.empty());
@@ -94,7 +94,7 @@ protected:
}
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _txn = *_txnPtr;
+ OperationContext& _opCtx = *_txnPtr;
OldClientWriteContext _ctx;
DBDirectClient _client;
bool _full;
@@ -112,13 +112,13 @@ public:
RecordId id1;
{
OpDebug* const nullOpDebug = nullptr;
- WriteUnitOfWork wunit(&_txn);
- ASSERT_OK(db->dropCollection(&_txn, _ns));
- coll = db->createCollection(&_txn, _ns);
+ WriteUnitOfWork wunit(&_opCtx);
+ ASSERT_OK(db->dropCollection(&_opCtx, _ns));
+ coll = db->createCollection(&_opCtx, _ns);
- ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1), nullOpDebug, true));
- id1 = coll->getCursor(&_txn)->next()->id;
- ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2), nullOpDebug, true));
+ ASSERT_OK(coll->insertDocument(&_opCtx, BSON("_id" << 1), nullOpDebug, true));
+ id1 = coll->getCursor(&_opCtx)->next()->id;
+ ASSERT_OK(coll->insertDocument(&_opCtx, BSON("_id" << 2), nullOpDebug, true));
wunit.commit();
}
@@ -128,8 +128,8 @@ public:
// Remove {_id: 1} from the record store, so we get more _id entries than records.
{
- WriteUnitOfWork wunit(&_txn);
- rs->deleteRecord(&_txn, id1);
+ WriteUnitOfWork wunit(&_opCtx);
+ rs->deleteRecord(&_opCtx, id1);
wunit.commit();
}
@@ -138,11 +138,11 @@ public:
// Insert records {_id: 0} and {_id: 1} , so we get too few _id entries, and verify
// validate fails.
{
- WriteUnitOfWork wunit(&_txn);
+ WriteUnitOfWork wunit(&_opCtx);
for (int j = 0; j < 2; j++) {
auto doc = BSON("_id" << j);
- ASSERT_OK(
- rs->insertRecord(&_txn, doc.objdata(), doc.objsize(), /*enforceQuota*/ false));
+ ASSERT_OK(rs->insertRecord(
+ &_opCtx, doc.objdata(), doc.objsize(), /*enforceQuota*/ false));
}
wunit.commit();
}
@@ -162,16 +162,18 @@ public:
RecordId id1;
{
OpDebug* const nullOpDebug = nullptr;
- WriteUnitOfWork wunit(&_txn);
- ASSERT_OK(db->dropCollection(&_txn, _ns));
- coll = db->createCollection(&_txn, _ns);
- ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << 1), nullOpDebug, true));
- id1 = coll->getCursor(&_txn)->next()->id;
- ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << 2), nullOpDebug, true));
+ WriteUnitOfWork wunit(&_opCtx);
+ ASSERT_OK(db->dropCollection(&_opCtx, _ns));
+ coll = db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(
+ coll->insertDocument(&_opCtx, BSON("_id" << 1 << "a" << 1), nullOpDebug, true));
+ id1 = coll->getCursor(&_opCtx)->next()->id;
+ ASSERT_OK(
+ coll->insertDocument(&_opCtx, BSON("_id" << 2 << "a" << 2), nullOpDebug, true));
wunit.commit();
}
- auto status = dbtests::createIndexFromSpec(&_txn,
+ auto status = dbtests::createIndexFromSpec(&_opCtx,
coll->ns().ns(),
BSON("name"
<< "a"
@@ -191,8 +193,8 @@ public:
// Remove a record, so we get more _id entries than records, and verify validate fails.
{
- WriteUnitOfWork wunit(&_txn);
- rs->deleteRecord(&_txn, id1);
+ WriteUnitOfWork wunit(&_opCtx);
+ rs->deleteRecord(&_opCtx, id1);
wunit.commit();
}
@@ -201,11 +203,11 @@ public:
// Insert two more records, so we get too few entries for a non-sparse index, and
// verify validate fails.
{
- WriteUnitOfWork wunit(&_txn);
+ WriteUnitOfWork wunit(&_opCtx);
for (int j = 0; j < 2; j++) {
auto doc = BSON("_id" << j);
- ASSERT_OK(
- rs->insertRecord(&_txn, doc.objdata(), doc.objsize(), /*enforceQuota*/ false));
+ ASSERT_OK(rs->insertRecord(
+ &_opCtx, doc.objdata(), doc.objsize(), /*enforceQuota*/ false));
}
wunit.commit();
}
@@ -224,17 +226,20 @@ public:
Collection* coll;
RecordId id1;
{
- WriteUnitOfWork wunit(&_txn);
- ASSERT_OK(db->dropCollection(&_txn, _ns));
- coll = db->createCollection(&_txn, _ns);
- ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << 1), nullOpDebug, true));
- id1 = coll->getCursor(&_txn)->next()->id;
- ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << 2), nullOpDebug, true));
- ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 3 << "b" << 3), nullOpDebug, true));
+ WriteUnitOfWork wunit(&_opCtx);
+ ASSERT_OK(db->dropCollection(&_opCtx, _ns));
+ coll = db->createCollection(&_opCtx, _ns);
+ ASSERT_OK(
+ coll->insertDocument(&_opCtx, BSON("_id" << 1 << "a" << 1), nullOpDebug, true));
+ id1 = coll->getCursor(&_opCtx)->next()->id;
+ ASSERT_OK(
+ coll->insertDocument(&_opCtx, BSON("_id" << 2 << "a" << 2), nullOpDebug, true));
+ ASSERT_OK(
+ coll->insertDocument(&_opCtx, BSON("_id" << 3 << "b" << 3), nullOpDebug, true));
wunit.commit();
}
- auto status = dbtests::createIndexFromSpec(&_txn,
+ auto status = dbtests::createIndexFromSpec(&_opCtx,
coll->ns().ns(),
BSON("name"
<< "a"
@@ -255,10 +260,10 @@ public:
// Update {a: 1} to {a: 9} without updating the index, so we get inconsistent values
// between the index and the document. Verify validate fails.
{
- WriteUnitOfWork wunit(&_txn);
+ WriteUnitOfWork wunit(&_opCtx);
auto doc = BSON("_id" << 1 << "a" << 9);
auto updateStatus = rs->updateRecord(
- &_txn, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL);
+ &_opCtx, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL);
ASSERT_OK(updateStatus);
wunit.commit();
@@ -279,13 +284,13 @@ public:
Collection* coll;
RecordId id1;
{
- WriteUnitOfWork wunit(&_txn);
- ASSERT_OK(db->dropCollection(&_txn, _ns));
- coll = db->createCollection(&_txn, _ns);
+ WriteUnitOfWork wunit(&_opCtx);
+ ASSERT_OK(db->dropCollection(&_opCtx, _ns));
+ coll = db->createCollection(&_opCtx, _ns);
- ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1), nullOpDebug, true));
- id1 = coll->getCursor(&_txn)->next()->id;
- ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2), nullOpDebug, true));
+ ASSERT_OK(coll->insertDocument(&_opCtx, BSON("_id" << 1), nullOpDebug, true));
+ id1 = coll->getCursor(&_opCtx)->next()->id;
+ ASSERT_OK(coll->insertDocument(&_opCtx, BSON("_id" << 2), nullOpDebug, true));
wunit.commit();
}
@@ -296,10 +301,10 @@ public:
// Update {_id: 1} to {_id: 9} without updating the index, so we get inconsistent values
// between the index and the document. Verify validate fails.
{
- WriteUnitOfWork wunit(&_txn);
+ WriteUnitOfWork wunit(&_opCtx);
auto doc = BSON("_id" << 9);
auto updateStatus = rs->updateRecord(
- &_txn, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL);
+ &_opCtx, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL);
ASSERT_OK(updateStatus);
wunit.commit();
}
@@ -308,10 +313,10 @@ public:
// Revert {_id: 9} to {_id: 1} and verify that validate succeeds.
{
- WriteUnitOfWork wunit(&_txn);
+ WriteUnitOfWork wunit(&_opCtx);
auto doc = BSON("_id" << 1);
auto updateStatus = rs->updateRecord(
- &_txn, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL);
+ &_opCtx, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL);
ASSERT_OK(updateStatus);
wunit.commit();
}
@@ -322,11 +327,12 @@ public:
// will still be the same number of index entries and documents, but one document will not
// have an index entry.
{
- WriteUnitOfWork wunit(&_txn);
- rs->deleteRecord(&_txn, id1);
+ WriteUnitOfWork wunit(&_opCtx);
+ rs->deleteRecord(&_opCtx, id1);
auto doc = BSON("_id" << 3);
- ASSERT_OK(rs->insertRecord(&_txn, doc.objdata(), doc.objsize(), /*enforceQuota*/ false)
- .getStatus());
+ ASSERT_OK(
+ rs->insertRecord(&_opCtx, doc.objdata(), doc.objsize(), /*enforceQuota*/ false)
+ .getStatus());
wunit.commit();
}
@@ -354,22 +360,22 @@ public:
// {a: [c: 1]}
auto doc3 = BSON("_id" << 3 << "a" << BSON_ARRAY(BSON("c" << 1)));
{
- WriteUnitOfWork wunit(&_txn);
- ASSERT_OK(db->dropCollection(&_txn, _ns));
- coll = db->createCollection(&_txn, _ns);
+ WriteUnitOfWork wunit(&_opCtx);
+ ASSERT_OK(db->dropCollection(&_opCtx, _ns));
+ coll = db->createCollection(&_opCtx, _ns);
- ASSERT_OK(coll->insertDocument(&_txn, doc1, nullOpDebug, true));
- id1 = coll->getCursor(&_txn)->next()->id;
- ASSERT_OK(coll->insertDocument(&_txn, doc2, nullOpDebug, true));
- ASSERT_OK(coll->insertDocument(&_txn, doc3, nullOpDebug, true));
+ ASSERT_OK(coll->insertDocument(&_opCtx, doc1, nullOpDebug, true));
+ id1 = coll->getCursor(&_opCtx)->next()->id;
+ ASSERT_OK(coll->insertDocument(&_opCtx, doc2, nullOpDebug, true));
+ ASSERT_OK(coll->insertDocument(&_opCtx, doc3, nullOpDebug, true));
wunit.commit();
}
ASSERT_TRUE(checkValid());
// Create multi-key index.
- auto status = dbtests::createIndexFromSpec(&_txn,
+ auto status = dbtests::createIndexFromSpec(&_opCtx,
coll->ns().ns(),
BSON("name"
<< "multikey_index"
@@ -389,9 +395,9 @@ public:
// Update a document's indexed field without updating the index.
{
- WriteUnitOfWork wunit(&_txn);
+ WriteUnitOfWork wunit(&_opCtx);
auto updateStatus = rs->updateRecord(
- &_txn, id1, doc1_b.objdata(), doc1_b.objsize(), /*enforceQuota*/ false, NULL);
+ &_opCtx, id1, doc1_b.objdata(), doc1_b.objsize(), /*enforceQuota*/ false, NULL);
ASSERT_OK(updateStatus);
wunit.commit();
}
@@ -401,9 +407,9 @@ public:
// Update a document's non-indexed field without updating the index.
// Index validation should still be valid.
{
- WriteUnitOfWork wunit(&_txn);
+ WriteUnitOfWork wunit(&_opCtx);
auto updateStatus = rs->updateRecord(
- &_txn, id1, doc1_c.objdata(), doc1_c.objsize(), /*enforceQuota*/ false, NULL);
+ &_opCtx, id1, doc1_c.objdata(), doc1_c.objsize(), /*enforceQuota*/ false, NULL);
ASSERT_OK(updateStatus);
wunit.commit();
}
@@ -423,19 +429,22 @@ public:
Collection* coll;
RecordId id1;
{
- WriteUnitOfWork wunit(&_txn);
- ASSERT_OK(db->dropCollection(&_txn, _ns));
- coll = db->createCollection(&_txn, _ns);
-
- ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << 1), nullOpDebug, true));
- id1 = coll->getCursor(&_txn)->next()->id;
- ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << 2), nullOpDebug, true));
- ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 3 << "b" << 1), nullOpDebug, true));
+ WriteUnitOfWork wunit(&_opCtx);
+ ASSERT_OK(db->dropCollection(&_opCtx, _ns));
+ coll = db->createCollection(&_opCtx, _ns);
+
+ ASSERT_OK(
+ coll->insertDocument(&_opCtx, BSON("_id" << 1 << "a" << 1), nullOpDebug, true));
+ id1 = coll->getCursor(&_opCtx)->next()->id;
+ ASSERT_OK(
+ coll->insertDocument(&_opCtx, BSON("_id" << 2 << "a" << 2), nullOpDebug, true));
+ ASSERT_OK(
+ coll->insertDocument(&_opCtx, BSON("_id" << 3 << "b" << 1), nullOpDebug, true));
wunit.commit();
}
// Create a sparse index.
- auto status = dbtests::createIndexFromSpec(&_txn,
+ auto status = dbtests::createIndexFromSpec(&_opCtx,
coll->ns().ns(),
BSON("name"
<< "sparse_index"
@@ -457,10 +466,10 @@ public:
// Update a document's indexed field without updating the index.
{
- WriteUnitOfWork wunit(&_txn);
+ WriteUnitOfWork wunit(&_opCtx);
auto doc = BSON("_id" << 2 << "a" << 3);
auto updateStatus = rs->updateRecord(
- &_txn, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL);
+ &_opCtx, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL);
ASSERT_OK(updateStatus);
wunit.commit();
}
@@ -480,22 +489,24 @@ public:
Collection* coll;
RecordId id1;
{
- WriteUnitOfWork wunit(&_txn);
- ASSERT_OK(db->dropCollection(&_txn, _ns));
- coll = db->createCollection(&_txn, _ns);
-
- ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << 1), nullOpDebug, true));
- id1 = coll->getCursor(&_txn)->next()->id;
- ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << 2), nullOpDebug, true));
+ WriteUnitOfWork wunit(&_opCtx);
+ ASSERT_OK(db->dropCollection(&_opCtx, _ns));
+ coll = db->createCollection(&_opCtx, _ns);
+
+ ASSERT_OK(
+ coll->insertDocument(&_opCtx, BSON("_id" << 1 << "a" << 1), nullOpDebug, true));
+ id1 = coll->getCursor(&_opCtx)->next()->id;
+ ASSERT_OK(
+ coll->insertDocument(&_opCtx, BSON("_id" << 2 << "a" << 2), nullOpDebug, true));
// Explicitly test that multi-key partial indexes containing documents that
// don't match the filter expression are handled correctly.
ASSERT_OK(coll->insertDocument(
- &_txn, BSON("_id" << 3 << "a" << BSON_ARRAY(-1 << -2 << -3)), nullOpDebug, true));
+ &_opCtx, BSON("_id" << 3 << "a" << BSON_ARRAY(-1 << -2 << -3)), nullOpDebug, true));
wunit.commit();
}
// Create a partial index.
- auto status = dbtests::createIndexFromSpec(&_txn,
+ auto status = dbtests::createIndexFromSpec(&_opCtx,
coll->ns().ns(),
BSON("name"
<< "partial_index"
@@ -517,10 +528,10 @@ public:
// Update an unindexed document without updating the index.
{
- WriteUnitOfWork wunit(&_txn);
+ WriteUnitOfWork wunit(&_opCtx);
auto doc = BSON("_id" << 1);
auto updateStatus = rs->updateRecord(
- &_txn, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL);
+ &_opCtx, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL);
ASSERT_OK(updateStatus);
wunit.commit();
}
@@ -541,16 +552,16 @@ public:
Collection* coll;
RecordId id1;
{
- WriteUnitOfWork wunit(&_txn);
- ASSERT_OK(db->dropCollection(&_txn, _ns));
- coll = db->createCollection(&_txn, _ns);
+ WriteUnitOfWork wunit(&_opCtx);
+ ASSERT_OK(db->dropCollection(&_opCtx, _ns));
+ coll = db->createCollection(&_opCtx, _ns);
ASSERT_OK(coll->insertDocument(
- &_txn, BSON("_id" << 1 << "x" << 1 << "a" << 2), nullOpDebug, true));
+ &_opCtx, BSON("_id" << 1 << "x" << 1 << "a" << 2), nullOpDebug, true));
wunit.commit();
}
// Create a partial geo index that indexes the document. This should throw an error.
- ASSERT_THROWS(dbtests::createIndexFromSpec(&_txn,
+ ASSERT_THROWS(dbtests::createIndexFromSpec(&_opCtx,
coll->ns().ns(),
BSON("name"
<< "partial_index"
@@ -568,7 +579,7 @@ public:
UserException);
// Create a partial geo index that does not index the document.
- auto status = dbtests::createIndexFromSpec(&_txn,
+ auto status = dbtests::createIndexFromSpec(&_opCtx,
coll->ns().ns(),
BSON("name"
<< "partial_index"
@@ -599,24 +610,27 @@ public:
Collection* coll;
RecordId id1;
{
- WriteUnitOfWork wunit(&_txn);
- ASSERT_OK(db->dropCollection(&_txn, _ns));
- coll = db->createCollection(&_txn, _ns);
+ WriteUnitOfWork wunit(&_opCtx);
+ ASSERT_OK(db->dropCollection(&_opCtx, _ns));
+ coll = db->createCollection(&_opCtx, _ns);
ASSERT_OK(coll->insertDocument(
- &_txn, BSON("_id" << 1 << "a" << 1 << "b" << 4), nullOpDebug, true));
- id1 = coll->getCursor(&_txn)->next()->id;
+ &_opCtx, BSON("_id" << 1 << "a" << 1 << "b" << 4), nullOpDebug, true));
+ id1 = coll->getCursor(&_opCtx)->next()->id;
ASSERT_OK(coll->insertDocument(
- &_txn, BSON("_id" << 2 << "a" << 2 << "b" << 5), nullOpDebug, true));
- ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 3 << "a" << 3), nullOpDebug, true));
- ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 4 << "b" << 6), nullOpDebug, true));
- ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 5 << "c" << 7), nullOpDebug, true));
+ &_opCtx, BSON("_id" << 2 << "a" << 2 << "b" << 5), nullOpDebug, true));
+ ASSERT_OK(
+ coll->insertDocument(&_opCtx, BSON("_id" << 3 << "a" << 3), nullOpDebug, true));
+ ASSERT_OK(
+ coll->insertDocument(&_opCtx, BSON("_id" << 4 << "b" << 6), nullOpDebug, true));
+ ASSERT_OK(
+ coll->insertDocument(&_opCtx, BSON("_id" << 5 << "c" << 7), nullOpDebug, true));
wunit.commit();
}
// Create two compound indexes, one forward and one reverse, to test
// validate()'s index direction parsing.
- auto status = dbtests::createIndexFromSpec(&_txn,
+ auto status = dbtests::createIndexFromSpec(&_opCtx,
coll->ns().ns(),
BSON("name"
<< "compound_index_1"
@@ -630,7 +644,7 @@ public:
<< false));
ASSERT_OK(status);
- status = dbtests::createIndexFromSpec(&_txn,
+ status = dbtests::createIndexFromSpec(&_opCtx,
coll->ns().ns(),
BSON("name"
<< "compound_index_2"
@@ -650,10 +664,10 @@ public:
// Update a document's indexed field without updating the index.
{
- WriteUnitOfWork wunit(&_txn);
+ WriteUnitOfWork wunit(&_opCtx);
auto doc = BSON("_id" << 1 << "a" << 1 << "b" << 3);
auto updateStatus = rs->updateRecord(
- &_txn, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL);
+ &_opCtx, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL);
ASSERT_OK(updateStatus);
wunit.commit();
}
@@ -673,20 +687,23 @@ public:
Collection* coll;
RecordId id1;
{
- WriteUnitOfWork wunit(&_txn);
- ASSERT_OK(db->dropCollection(&_txn, _ns));
- coll = db->createCollection(&_txn, _ns);
-
- ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << 1), nullOpDebug, true));
- id1 = coll->getCursor(&_txn)->next()->id;
- ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << 2), nullOpDebug, true));
- ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 3 << "b" << 1), nullOpDebug, true));
+ WriteUnitOfWork wunit(&_opCtx);
+ ASSERT_OK(db->dropCollection(&_opCtx, _ns));
+ coll = db->createCollection(&_opCtx, _ns);
+
+ ASSERT_OK(
+ coll->insertDocument(&_opCtx, BSON("_id" << 1 << "a" << 1), nullOpDebug, true));
+ id1 = coll->getCursor(&_opCtx)->next()->id;
+ ASSERT_OK(
+ coll->insertDocument(&_opCtx, BSON("_id" << 2 << "a" << 2), nullOpDebug, true));
+ ASSERT_OK(
+ coll->insertDocument(&_opCtx, BSON("_id" << 3 << "b" << 1), nullOpDebug, true));
wunit.commit();
}
const std::string indexName = "bad_index";
auto status = dbtests::createIndexFromSpec(
- &_txn,
+ &_opCtx,
coll->ns().ns(),
BSON("name" << indexName << "ns" << coll->ns().ns() << "key" << BSON("a" << 1) << "v"
<< static_cast<int>(kIndexVersion)
@@ -698,11 +715,11 @@ public:
// Replace a correct index entry with a bad one and check it's invalid.
IndexCatalog* indexCatalog = coll->getIndexCatalog();
- IndexDescriptor* descriptor = indexCatalog->findIndexByName(&_txn, indexName);
+ IndexDescriptor* descriptor = indexCatalog->findIndexByName(&_opCtx, indexName);
IndexAccessMethod* iam = indexCatalog->getIndex(descriptor);
{
- WriteUnitOfWork wunit(&_txn);
+ WriteUnitOfWork wunit(&_opCtx);
int64_t numDeleted;
int64_t numInserted;
const BSONObj actualKey = BSON("a" << 1);
@@ -710,8 +727,8 @@ public:
InsertDeleteOptions options;
options.dupsAllowed = true;
options.logIfError = true;
- auto removeStatus = iam->remove(&_txn, actualKey, id1, options, &numDeleted);
- auto insertStatus = iam->insert(&_txn, badKey, id1, options, &numInserted);
+ auto removeStatus = iam->remove(&_opCtx, actualKey, id1, options, &numDeleted);
+ auto insertStatus = iam->insert(&_opCtx, badKey, id1, options, &numInserted);
ASSERT_EQUALS(numDeleted, 1);
ASSERT_EQUALS(numInserted, 1);
@@ -735,20 +752,23 @@ public:
Collection* coll;
RecordId id1;
{
- WriteUnitOfWork wunit(&_txn);
- ASSERT_OK(db->dropCollection(&_txn, _ns));
- coll = db->createCollection(&_txn, _ns);
-
- ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << 1), nullOpDebug, true));
- id1 = coll->getCursor(&_txn)->next()->id;
- ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << 2), nullOpDebug, true));
- ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 3 << "b" << 1), nullOpDebug, true));
+ WriteUnitOfWork wunit(&_opCtx);
+ ASSERT_OK(db->dropCollection(&_opCtx, _ns));
+ coll = db->createCollection(&_opCtx, _ns);
+
+ ASSERT_OK(
+ coll->insertDocument(&_opCtx, BSON("_id" << 1 << "a" << 1), nullOpDebug, true));
+ id1 = coll->getCursor(&_opCtx)->next()->id;
+ ASSERT_OK(
+ coll->insertDocument(&_opCtx, BSON("_id" << 2 << "a" << 2), nullOpDebug, true));
+ ASSERT_OK(
+ coll->insertDocument(&_opCtx, BSON("_id" << 3 << "b" << 1), nullOpDebug, true));
wunit.commit();
}
const std::string indexName = "bad_index";
auto status = dbtests::createIndexFromSpec(
- &_txn,
+ &_opCtx,
coll->ns().ns(),
BSON("name" << indexName << "ns" << coll->ns().ns() << "key" << BSON("a" << 1) << "v"
<< static_cast<int>(kIndexVersion)
@@ -761,7 +781,7 @@ public:
// Change the IndexDescriptor's keyPattern to descending so the index ordering
// appears wrong.
IndexCatalog* indexCatalog = coll->getIndexCatalog();
- IndexDescriptor* descriptor = indexCatalog->findIndexByName(&_txn, indexName);
+ IndexDescriptor* descriptor = indexCatalog->findIndexByName(&_opCtx, indexName);
descriptor->setKeyPatternForTest(BSON("a" << -1));
ASSERT_FALSE(checkValid());
diff --git a/src/mongo/executor/network_interface_asio.cpp b/src/mongo/executor/network_interface_asio.cpp
index 4b2e8df7428..8fb4733bbb6 100644
--- a/src/mongo/executor/network_interface_asio.cpp
+++ b/src/mongo/executor/network_interface_asio.cpp
@@ -232,7 +232,7 @@ Status attachMetadataIfNeeded(RemoteCommandRequest& request,
auto writeStatus = callNoexcept(*metadataHook,
&rpc::EgressMetadataHook::writeRequestMetadata,
- request.txn,
+ request.opCtx,
request.target,
&augmentedBob);
if (!writeStatus.isOK()) {
diff --git a/src/mongo/executor/network_interface_asio_test.cpp b/src/mongo/executor/network_interface_asio_test.cpp
index d2a1d342915..f7e6d4a091e 100644
--- a/src/mongo/executor/network_interface_asio_test.cpp
+++ b/src/mongo/executor/network_interface_asio_test.cpp
@@ -988,7 +988,7 @@ public:
TestMetadataHook(bool* wroteRequestMetadata, bool* gotReplyMetadata)
: _wroteRequestMetadata(wroteRequestMetadata), _gotReplyMetadata(gotReplyMetadata) {}
- Status writeRequestMetadata(OperationContext* txn,
+ Status writeRequestMetadata(OperationContext* opCtx,
const HostAndPort& requestDestination,
BSONObjBuilder* metadataBob) override {
metadataBob->append("foo", "bar");
diff --git a/src/mongo/executor/remote_command_request.cpp b/src/mongo/executor/remote_command_request.cpp
index 44834b2abc4..e5362f17386 100644
--- a/src/mongo/executor/remote_command_request.cpp
+++ b/src/mongo/executor/remote_command_request.cpp
@@ -56,28 +56,28 @@ RemoteCommandRequest::RemoteCommandRequest(RequestId requestId,
const std::string& theDbName,
const BSONObj& theCmdObj,
const BSONObj& metadataObj,
- OperationContext* txn,
+ OperationContext* opCtx,
Milliseconds timeoutMillis)
: id(requestId),
target(theTarget),
dbname(theDbName),
metadata(metadataObj),
cmdObj(theCmdObj),
- txn(txn),
+ opCtx(opCtx),
timeout(timeoutMillis) {}
RemoteCommandRequest::RemoteCommandRequest(const HostAndPort& theTarget,
const std::string& theDbName,
const BSONObj& theCmdObj,
const BSONObj& metadataObj,
- OperationContext* txn,
+ OperationContext* opCtx,
Milliseconds timeoutMillis)
: RemoteCommandRequest(requestIdCounter.addAndFetch(1),
theTarget,
theDbName,
theCmdObj,
metadataObj,
- txn,
+ opCtx,
timeoutMillis) {}
std::string RemoteCommandRequest::toString() const {
diff --git a/src/mongo/executor/remote_command_request.h b/src/mongo/executor/remote_command_request.h
index c3c37777948..e2e16b95261 100644
--- a/src/mongo/executor/remote_command_request.h
+++ b/src/mongo/executor/remote_command_request.h
@@ -60,33 +60,33 @@ struct RemoteCommandRequest {
const std::string& theDbName,
const BSONObj& theCmdObj,
const BSONObj& metadataObj,
- OperationContext* txn,
+ OperationContext* opCtx,
Milliseconds timeoutMillis);
RemoteCommandRequest(const HostAndPort& theTarget,
const std::string& theDbName,
const BSONObj& theCmdObj,
const BSONObj& metadataObj,
- OperationContext* txn,
+ OperationContext* opCtx,
Milliseconds timeoutMillis = kNoTimeout);
RemoteCommandRequest(const HostAndPort& theTarget,
const std::string& theDbName,
const BSONObj& theCmdObj,
- OperationContext* txn,
+ OperationContext* opCtx,
Milliseconds timeoutMillis = kNoTimeout)
: RemoteCommandRequest(
- theTarget, theDbName, theCmdObj, rpc::makeEmptyMetadata(), txn, timeoutMillis) {}
+ theTarget, theDbName, theCmdObj, rpc::makeEmptyMetadata(), opCtx, timeoutMillis) {}
RemoteCommandRequest(const HostAndPort& theTarget,
const rpc::RequestInterface& request,
- OperationContext* txn,
+ OperationContext* opCtx,
Milliseconds timeoutMillis = kNoTimeout)
: RemoteCommandRequest(theTarget,
request.getDatabase().toString(),
request.getCommandArgs(),
request.getMetadata(),
- txn,
+ opCtx,
timeoutMillis) {}
std::string toString() const;
@@ -109,7 +109,7 @@ struct RemoteCommandRequest {
// NetworkInterfaces that do user work (i.e. reads, and writes) so that audit and client
// metadata is propagated. It is allowed to be null if used on NetworkInterfaces without
// metadata attachment (i.e., replication).
- OperationContext* txn{nullptr};
+ OperationContext* opCtx{nullptr};
Milliseconds timeout = kNoTimeout;
diff --git a/src/mongo/executor/task_executor.cpp b/src/mongo/executor/task_executor.cpp
index a0ceb239591..22b17bcf772 100644
--- a/src/mongo/executor/task_executor.cpp
+++ b/src/mongo/executor/task_executor.cpp
@@ -57,7 +57,7 @@ TaskExecutor::CallbackArgs::CallbackArgs(TaskExecutor* theExecutor,
: executor(theExecutor),
myHandle(std::move(theHandle)),
status(std::move(theStatus)),
- txn(theTxn) {}
+ opCtx(theTxn) {}
TaskExecutor::RemoteCommandCallbackArgs::RemoteCommandCallbackArgs(
diff --git a/src/mongo/executor/task_executor.h b/src/mongo/executor/task_executor.h
index 2d558512f91..a17b6482451 100644
--- a/src/mongo/executor/task_executor.h
+++ b/src/mongo/executor/task_executor.h
@@ -390,12 +390,12 @@ struct TaskExecutor::CallbackArgs {
CallbackArgs(TaskExecutor* theExecutor,
CallbackHandle theHandle,
Status theStatus,
- OperationContext* txn = NULL);
+ OperationContext* opCtx = NULL);
TaskExecutor* executor;
CallbackHandle myHandle;
Status status;
- OperationContext* txn;
+ OperationContext* opCtx;
};
/**
diff --git a/src/mongo/rpc/metadata.cpp b/src/mongo/rpc/metadata.cpp
index 00a9d58dc9f..00d3c01ae78 100644
--- a/src/mongo/rpc/metadata.cpp
+++ b/src/mongo/rpc/metadata.cpp
@@ -46,7 +46,7 @@ BSONObj makeEmptyMetadata() {
return BSONObj();
}
-Status readRequestMetadata(OperationContext* txn, const BSONObj& metadataObj) {
+Status readRequestMetadata(OperationContext* opCtx, const BSONObj& metadataObj) {
BSONElement ssmElem;
BSONElement auditElem;
BSONElement configSvrElem;
@@ -72,16 +72,16 @@ Status readRequestMetadata(OperationContext* txn, const BSONObj& metadataObj) {
if (!swServerSelectionMetadata.isOK()) {
return swServerSelectionMetadata.getStatus();
}
- ServerSelectionMetadata::get(txn) = std::move(swServerSelectionMetadata.getValue());
+ ServerSelectionMetadata::get(opCtx) = std::move(swServerSelectionMetadata.getValue());
auto swAuditMetadata = AuditMetadata::readFromMetadata(auditElem);
if (!swAuditMetadata.isOK()) {
return swAuditMetadata.getStatus();
}
- AuditMetadata::get(txn) = std::move(swAuditMetadata.getValue());
+ AuditMetadata::get(opCtx) = std::move(swAuditMetadata.getValue());
const auto statusClientMetadata =
- ClientMetadataIsMasterState::readFromMetadata(txn, clientElem);
+ ClientMetadataIsMasterState::readFromMetadata(opCtx, clientElem);
if (!statusClientMetadata.isOK()) {
return statusClientMetadata;
}
@@ -90,19 +90,19 @@ Status readRequestMetadata(OperationContext* txn, const BSONObj& metadataObj) {
if (!configServerMetadata.isOK()) {
return configServerMetadata.getStatus();
}
- ConfigServerMetadata::get(txn) = std::move(configServerMetadata.getValue());
+ ConfigServerMetadata::get(opCtx) = std::move(configServerMetadata.getValue());
auto trackingMetadata = TrackingMetadata::readFromMetadata(trackingElem);
if (!trackingMetadata.isOK()) {
return trackingMetadata.getStatus();
}
- TrackingMetadata::get(txn) = std::move(trackingMetadata.getValue());
+ TrackingMetadata::get(opCtx) = std::move(trackingMetadata.getValue());
return Status::OK();
}
-Status writeRequestMetadata(OperationContext* txn, BSONObjBuilder* metadataBob) {
- auto ssStatus = ServerSelectionMetadata::get(txn).writeToMetadata(metadataBob);
+Status writeRequestMetadata(OperationContext* opCtx, BSONObjBuilder* metadataBob) {
+ auto ssStatus = ServerSelectionMetadata::get(opCtx).writeToMetadata(metadataBob);
if (!ssStatus.isOK()) {
return ssStatus;
}
diff --git a/src/mongo/rpc/metadata.h b/src/mongo/rpc/metadata.h
index 9af7bfdfeb5..d8956de30f7 100644
--- a/src/mongo/rpc/metadata.h
+++ b/src/mongo/rpc/metadata.h
@@ -53,12 +53,12 @@ BSONObj makeEmptyMetadata();
/**
* Reads metadata from a metadata object and sets it on this OperationContext.
*/
-Status readRequestMetadata(OperationContext* txn, const BSONObj& metadataObj);
+Status readRequestMetadata(OperationContext* opCtx, const BSONObj& metadataObj);
/**
* Writes metadata from an OperationContext to a metadata object.
*/
-Status writeRequestMetadata(OperationContext* txn, BSONObjBuilder* metadataBob);
+Status writeRequestMetadata(OperationContext* opCtx, BSONObjBuilder* metadataBob);
/**
* A command object and a corresponding metadata object.
diff --git a/src/mongo/rpc/metadata/client_metadata_ismaster.cpp b/src/mongo/rpc/metadata/client_metadata_ismaster.cpp
index 234f61c78e8..3eb19853417 100644
--- a/src/mongo/rpc/metadata/client_metadata_ismaster.cpp
+++ b/src/mongo/rpc/metadata/client_metadata_ismaster.cpp
@@ -74,7 +74,8 @@ void ClientMetadataIsMasterState::setClientMetadata(
}
-Status ClientMetadataIsMasterState::readFromMetadata(OperationContext* txn, BSONElement& element) {
+Status ClientMetadataIsMasterState::readFromMetadata(OperationContext* opCtx,
+ BSONElement& element) {
if (element.eoo()) {
return Status::OK();
}
@@ -85,23 +86,24 @@ Status ClientMetadataIsMasterState::readFromMetadata(OperationContext* txn, BSON
return swParseClientMetadata.getStatus();
}
- auto& clientMetadataIsMasterState = ClientMetadataIsMasterState::get(txn->getClient());
+ auto& clientMetadataIsMasterState = ClientMetadataIsMasterState::get(opCtx->getClient());
- clientMetadataIsMasterState.setClientMetadata(txn->getClient(),
+ clientMetadataIsMasterState.setClientMetadata(opCtx->getClient(),
std::move(swParseClientMetadata.getValue()));
return Status::OK();
}
-void ClientMetadataIsMasterState::writeToMetadata(OperationContext* txn, BSONObjBuilder* builder) {
+void ClientMetadataIsMasterState::writeToMetadata(OperationContext* opCtx,
+ BSONObjBuilder* builder) {
// We may be asked to write metadata on background threads that are not associated with an
// operation context
- if (!txn) {
+ if (!opCtx) {
return;
}
const auto& clientMetadata =
- ClientMetadataIsMasterState::get(txn->getClient()).getClientMetadata();
+ ClientMetadataIsMasterState::get(opCtx->getClient()).getClientMetadata();
// Skip appending metadata if there is none
if (!clientMetadata || clientMetadata.get().getDocument().isEmpty()) {
diff --git a/src/mongo/rpc/metadata/client_metadata_ismaster.h b/src/mongo/rpc/metadata/client_metadata_ismaster.h
index 305018343d0..b4e38e7e430 100644
--- a/src/mongo/rpc/metadata/client_metadata_ismaster.h
+++ b/src/mongo/rpc/metadata/client_metadata_ismaster.h
@@ -79,7 +79,7 @@ public:
* Thread-Safety:
* None - must be only be read and written from the thread owning "Client".
*/
- static Status readFromMetadata(OperationContext* txn, BSONElement& elem);
+ static Status readFromMetadata(OperationContext* opCtx, BSONElement& elem);
/**
* Write the $client section to OP_Command's metadata if there is a non-empty client metadata
@@ -88,7 +88,7 @@ public:
* Thread-Safety:
* None - must be only be read and written from the thread owning "Client".
*/
- static void writeToMetadata(OperationContext* txn, BSONObjBuilder* builder);
+ static void writeToMetadata(OperationContext* opCtx, BSONObjBuilder* builder);
private:
// Optional client metadata document.
diff --git a/src/mongo/rpc/metadata/egress_metadata_hook_list.cpp b/src/mongo/rpc/metadata/egress_metadata_hook_list.cpp
index 70c90dde0cf..5e11c84eb60 100644
--- a/src/mongo/rpc/metadata/egress_metadata_hook_list.cpp
+++ b/src/mongo/rpc/metadata/egress_metadata_hook_list.cpp
@@ -40,11 +40,11 @@ void EgressMetadataHookList::addHook(std::unique_ptr<EgressMetadataHook>&& newHo
_hooks.emplace_back(std::forward<std::unique_ptr<EgressMetadataHook>>(newHook));
}
-Status EgressMetadataHookList::writeRequestMetadata(OperationContext* txn,
+Status EgressMetadataHookList::writeRequestMetadata(OperationContext* opCtx,
const HostAndPort& requestDestination,
BSONObjBuilder* metadataBob) {
for (auto&& hook : _hooks) {
- auto status = hook->writeRequestMetadata(txn, requestDestination, metadataBob);
+ auto status = hook->writeRequestMetadata(opCtx, requestDestination, metadataBob);
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/rpc/metadata/egress_metadata_hook_list.h b/src/mongo/rpc/metadata/egress_metadata_hook_list.h
index 7a3542c1084..54b03b80c5e 100644
--- a/src/mongo/rpc/metadata/egress_metadata_hook_list.h
+++ b/src/mongo/rpc/metadata/egress_metadata_hook_list.h
@@ -58,7 +58,7 @@ public:
* early if one of hooks returned a non OK status and return it. Note that metadataBob should
* not be used if Status is not OK as the contents can be partial.
*/
- Status writeRequestMetadata(OperationContext* txn,
+ Status writeRequestMetadata(OperationContext* opCtx,
const HostAndPort& requestDestination,
BSONObjBuilder* metadataBob) override;
diff --git a/src/mongo/rpc/metadata/egress_metadata_hook_list_test.cpp b/src/mongo/rpc/metadata/egress_metadata_hook_list_test.cpp
index a40fcdc57db..159a78412fb 100644
--- a/src/mongo/rpc/metadata/egress_metadata_hook_list_test.cpp
+++ b/src/mongo/rpc/metadata/egress_metadata_hook_list_test.cpp
@@ -51,7 +51,7 @@ class TestHook : public EgressMetadataHook {
public:
TestHook(string fieldName, ReadReplyArgs* arg) : _fieldName(fieldName), _arg(arg) {}
- Status writeRequestMetadata(OperationContext* txn,
+ Status writeRequestMetadata(OperationContext* opCtx,
const HostAndPort& requestDestination,
BSONObjBuilder* metadataBob) override {
metadataBob->append(_fieldName, requestDestination.toString());
@@ -74,7 +74,7 @@ class FixedStatusTestHook : public EgressMetadataHook {
public:
FixedStatusTestHook(Status status) : _toRet(status) {}
- Status writeRequestMetadata(OperationContext* txn,
+ Status writeRequestMetadata(OperationContext* opCtx,
const HostAndPort& requestDestination,
BSONObjBuilder* metadataBob) override {
return _toRet;
diff --git a/src/mongo/rpc/metadata/metadata_hook.h b/src/mongo/rpc/metadata/metadata_hook.h
index 2369fb8d47a..81f0001f548 100644
--- a/src/mongo/rpc/metadata/metadata_hook.h
+++ b/src/mongo/rpc/metadata/metadata_hook.h
@@ -54,10 +54,11 @@ public:
* Writes to an outgoing request metadata object. This method must not throw or block on
* database or network operations and can be called by multiple concurrent threads.
*
- * txn may be null as writeRequestMetadata may be called on ASIO background threads, and may not
+ * opCtx may be null as writeRequestMetadata may be called on ASIO background threads, and may
+ * not
* have an OperationContext as a result.
*/
- virtual Status writeRequestMetadata(OperationContext* txn,
+ virtual Status writeRequestMetadata(OperationContext* opCtx,
const HostAndPort& requestDestination,
BSONObjBuilder* metadataBob) = 0;
diff --git a/src/mongo/rpc/protocol.h b/src/mongo/rpc/protocol.h
index 6ac205ccc2f..50392fedcd0 100644
--- a/src/mongo/rpc/protocol.h
+++ b/src/mongo/rpc/protocol.h
@@ -81,12 +81,12 @@ const ProtocolSet kAll = kOpQueryOnly | kOpCommandOnly;
/**
* Returns the protocol used to initiate the current operation.
*/
-Protocol getOperationProtocol(OperationContext* txn);
+Protocol getOperationProtocol(OperationContext* opCtx);
/**
* Sets the protocol used to initiate the current operation.
*/
-void setOperationProtocol(OperationContext* txn, Protocol protocol);
+void setOperationProtocol(OperationContext* opCtx, Protocol protocol);
/**
* Returns the newest protocol supported by two parties.
diff --git a/src/mongo/s/async_requests_sender.cpp b/src/mongo/s/async_requests_sender.cpp
index 3c3cd194f78..1ec65430f75 100644
--- a/src/mongo/s/async_requests_sender.cpp
+++ b/src/mongo/s/async_requests_sender.cpp
@@ -51,7 +51,7 @@ const int kMaxNumFailedHostRetryAttempts = 3;
} // namespace
-AsyncRequestsSender::AsyncRequestsSender(OperationContext* txn,
+AsyncRequestsSender::AsyncRequestsSender(OperationContext* opCtx,
executor::TaskExecutor* executor,
StringData db,
const std::vector<AsyncRequestsSender::Request>& requests,
@@ -73,7 +73,7 @@ AsyncRequestsSender::AsyncRequestsSender(OperationContext* txn,
_metadataObj = metadataBuilder.obj();
// Schedule the requests immediately.
- _scheduleRequestsIfNeeded(txn);
+ _scheduleRequestsIfNeeded(opCtx);
}
AsyncRequestsSender::~AsyncRequestsSender() {
@@ -81,7 +81,7 @@ AsyncRequestsSender::~AsyncRequestsSender() {
}
std::vector<AsyncRequestsSender::Response> AsyncRequestsSender::waitForResponses(
- OperationContext* txn) {
+ OperationContext* opCtx) {
invariant(!_remotes.empty());
// Until all remotes have received a response or error, keep scheduling retries and waiting on
@@ -91,7 +91,7 @@ std::vector<AsyncRequestsSender::Response> AsyncRequestsSender::waitForResponses
// Note: if we have been interrupt()'d or if some remote had a non-retriable error and
// allowPartialResults is false, no retries will be scheduled.
- _scheduleRequestsIfNeeded(txn);
+ _scheduleRequestsIfNeeded(opCtx);
}
// Construct the responses.
@@ -152,7 +152,7 @@ bool AsyncRequestsSender::_done_inlock() {
* 2. Remotes that already successfully received a response will have a non-empty 'response'.
* 3. Remotes that have reached maximum retries will have an error status.
*/
-void AsyncRequestsSender::_scheduleRequestsIfNeeded(OperationContext* txn) {
+void AsyncRequestsSender::_scheduleRequestsIfNeeded(OperationContext* opCtx) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
// We can't make a new notification if there was a previous one that has not been signaled.
@@ -175,7 +175,7 @@ void AsyncRequestsSender::_scheduleRequestsIfNeeded(OperationContext* txn) {
// If we have not yet received a response or error for this remote, and we do not have an
// outstanding request for this remote, schedule remote work to send the command.
if (!remote.swResponse && !remote.cbHandle.isValid()) {
- auto scheduleStatus = _scheduleRequest_inlock(txn, i);
+ auto scheduleStatus = _scheduleRequest_inlock(opCtx, i);
if (!scheduleStatus.isOK()) {
// Being unable to schedule a request to a remote is a non-retriable error.
remote.swResponse = std::move(scheduleStatus);
@@ -191,7 +191,7 @@ void AsyncRequestsSender::_scheduleRequestsIfNeeded(OperationContext* txn) {
}
}
-Status AsyncRequestsSender::_scheduleRequest_inlock(OperationContext* txn, size_t remoteIndex) {
+Status AsyncRequestsSender::_scheduleRequest_inlock(OperationContext* opCtx, size_t remoteIndex) {
auto& remote = _remotes[remoteIndex];
invariant(!remote.cbHandle.isValid());
@@ -203,12 +203,15 @@ Status AsyncRequestsSender::_scheduleRequest_inlock(OperationContext* txn, size_
}
executor::RemoteCommandRequest request(
- remote.getTargetHost(), _db.toString(), remote.cmdObj, _metadataObj, txn);
-
- auto callbackStatus = _executor->scheduleRemoteCommand(
- request,
- stdx::bind(
- &AsyncRequestsSender::_handleResponse, this, stdx::placeholders::_1, txn, remoteIndex));
+ remote.getTargetHost(), _db.toString(), remote.cmdObj, _metadataObj, opCtx);
+
+ auto callbackStatus =
+ _executor->scheduleRemoteCommand(request,
+ stdx::bind(&AsyncRequestsSender::_handleResponse,
+ this,
+ stdx::placeholders::_1,
+ opCtx,
+ remoteIndex));
if (!callbackStatus.isOK()) {
return callbackStatus.getStatus();
}
@@ -219,7 +222,7 @@ Status AsyncRequestsSender::_scheduleRequest_inlock(OperationContext* txn, size_
void AsyncRequestsSender::_handleResponse(
const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData,
- OperationContext* txn,
+ OperationContext* opCtx,
size_t remoteIndex) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
diff --git a/src/mongo/s/async_requests_sender.h b/src/mongo/s/async_requests_sender.h
index daf48c558aa..9f8664f9c2a 100644
--- a/src/mongo/s/async_requests_sender.h
+++ b/src/mongo/s/async_requests_sender.h
@@ -55,8 +55,9 @@ namespace mongo {
*
* Typical usage is:
*
- * AsyncRequestsSender ars(txn, executor, db, requests, readPrefSetting); // schedule the requests
- * auto responses = ars.waitForResponses(txn); // wait for responses; retries on retriable erors
+ * AsyncRequestsSender ars(opCtx, executor, db, requests, readPrefSetting); // schedule the
+ * requests
+ * auto responses = ars.waitForResponses(opCtx); // wait for responses; retries on retriable erors
*
* Additionally, you can interrupt() (if you want waitForResponses() to wait for responses for
* outstanding requests but stop scheduling retries) or kill() (if you want to cancel outstanding
@@ -103,7 +104,7 @@ public:
* Constructs a new AsyncRequestsSender. The TaskExecutor* must remain valid for the lifetime of
* the ARS.
*/
- AsyncRequestsSender(OperationContext* txn,
+ AsyncRequestsSender(OperationContext* opCtx,
executor::TaskExecutor* executor,
StringData db,
const std::vector<AsyncRequestsSender::Request>& requests,
@@ -122,7 +123,7 @@ public:
*
* Must only be called once.
*/
- std::vector<Response> waitForResponses(OperationContext* txn);
+ std::vector<Response> waitForResponses(OperationContext* opCtx);
/**
* Stops the ARS from retrying requests. Causes waitForResponses() to wait until any outstanding
@@ -161,7 +162,7 @@ private:
*
* Invalid to call if there is an existing Notification and it has not yet been signaled.
*/
- void _scheduleRequestsIfNeeded(OperationContext* txn);
+ void _scheduleRequestsIfNeeded(OperationContext* opCtx);
/**
* Helper to schedule a command to a remote.
@@ -171,7 +172,7 @@ private:
*
* Returns success if the command to retrieve the next batch was scheduled successfully.
*/
- Status _scheduleRequest_inlock(OperationContext* txn, size_t remoteIndex);
+ Status _scheduleRequest_inlock(OperationContext* opCtx, size_t remoteIndex);
/**
* The callback for a remote command.
@@ -185,7 +186,7 @@ private:
* On a non-retriable error, if allowPartialResults is false, sets _stopRetrying to true.
*/
void _handleResponse(const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData,
- OperationContext* txn,
+ OperationContext* opCtx,
size_t remoteIndex);
/**
diff --git a/src/mongo/s/balancer_configuration.cpp b/src/mongo/s/balancer_configuration.cpp
index 79098a2ec4a..b332444a389 100644
--- a/src/mongo/s/balancer_configuration.cpp
+++ b/src/mongo/s/balancer_configuration.cpp
@@ -77,10 +77,10 @@ BalancerSettingsType::BalancerMode BalancerConfiguration::getBalancerMode() cons
return _balancerSettings.getMode();
}
-Status BalancerConfiguration::setBalancerMode(OperationContext* txn,
+Status BalancerConfiguration::setBalancerMode(OperationContext* opCtx,
BalancerSettingsType::BalancerMode mode) {
- auto updateStatus = Grid::get(txn)->catalogClient(txn)->updateConfigDocument(
- txn,
+ auto updateStatus = Grid::get(opCtx)->catalogClient(opCtx)->updateConfigDocument(
+ opCtx,
kSettingsNamespace.ns(),
BSON("_id" << BalancerSettingsType::kKey),
BSON("$set" << BSON(kStopped << (mode == BalancerSettingsType::kOff) << kMode
@@ -88,7 +88,7 @@ Status BalancerConfiguration::setBalancerMode(OperationContext* txn,
true,
ShardingCatalogClient::kMajorityWriteConcern);
- Status refreshStatus = refreshAndCheck(txn);
+ Status refreshStatus = refreshAndCheck(opCtx);
if (!refreshStatus.isOK()) {
return refreshStatus;
}
@@ -131,9 +131,9 @@ bool BalancerConfiguration::waitForDelete() const {
return _balancerSettings.waitForDelete();
}
-Status BalancerConfiguration::refreshAndCheck(OperationContext* txn) {
+Status BalancerConfiguration::refreshAndCheck(OperationContext* opCtx) {
// Balancer configuration
- Status balancerSettingsStatus = _refreshBalancerSettings(txn);
+ Status balancerSettingsStatus = _refreshBalancerSettings(opCtx);
if (!balancerSettingsStatus.isOK()) {
return {balancerSettingsStatus.code(),
str::stream() << "Failed to refresh the balancer settings due to "
@@ -141,7 +141,7 @@ Status BalancerConfiguration::refreshAndCheck(OperationContext* txn) {
}
// Chunk size settings
- Status chunkSizeStatus = _refreshChunkSizeSettings(txn);
+ Status chunkSizeStatus = _refreshChunkSizeSettings(opCtx);
if (!chunkSizeStatus.isOK()) {
return {chunkSizeStatus.code(),
str::stream() << "Failed to refresh the chunk sizes settings due to "
@@ -149,7 +149,7 @@ Status BalancerConfiguration::refreshAndCheck(OperationContext* txn) {
}
// AutoSplit settings
- Status autoSplitStatus = _refreshAutoSplitSettings(txn);
+ Status autoSplitStatus = _refreshAutoSplitSettings(opCtx);
if (!autoSplitStatus.isOK()) {
return {autoSplitStatus.code(),
str::stream() << "Failed to refresh the autoSplit settings due to "
@@ -159,11 +159,11 @@ Status BalancerConfiguration::refreshAndCheck(OperationContext* txn) {
return Status::OK();
}
-Status BalancerConfiguration::_refreshBalancerSettings(OperationContext* txn) {
+Status BalancerConfiguration::_refreshBalancerSettings(OperationContext* opCtx) {
BalancerSettingsType settings = BalancerSettingsType::createDefault();
- auto settingsObjStatus =
- Grid::get(txn)->catalogClient(txn)->getGlobalSettings(txn, BalancerSettingsType::kKey);
+ auto settingsObjStatus = Grid::get(opCtx)->catalogClient(opCtx)->getGlobalSettings(
+ opCtx, BalancerSettingsType::kKey);
if (settingsObjStatus.isOK()) {
auto settingsStatus = BalancerSettingsType::fromBSON(settingsObjStatus.getValue());
if (!settingsStatus.isOK()) {
@@ -181,11 +181,11 @@ Status BalancerConfiguration::_refreshBalancerSettings(OperationContext* txn) {
return Status::OK();
}
-Status BalancerConfiguration::_refreshChunkSizeSettings(OperationContext* txn) {
+Status BalancerConfiguration::_refreshChunkSizeSettings(OperationContext* opCtx) {
ChunkSizeSettingsType settings = ChunkSizeSettingsType::createDefault();
auto settingsObjStatus =
- grid.catalogClient(txn)->getGlobalSettings(txn, ChunkSizeSettingsType::kKey);
+ grid.catalogClient(opCtx)->getGlobalSettings(opCtx, ChunkSizeSettingsType::kKey);
if (settingsObjStatus.isOK()) {
auto settingsStatus = ChunkSizeSettingsType::fromBSON(settingsObjStatus.getValue());
if (!settingsStatus.isOK()) {
@@ -207,11 +207,11 @@ Status BalancerConfiguration::_refreshChunkSizeSettings(OperationContext* txn) {
return Status::OK();
}
-Status BalancerConfiguration::_refreshAutoSplitSettings(OperationContext* txn) {
+Status BalancerConfiguration::_refreshAutoSplitSettings(OperationContext* opCtx) {
AutoSplitSettingsType settings = AutoSplitSettingsType::createDefault();
auto settingsObjStatus =
- grid.catalogClient(txn)->getGlobalSettings(txn, AutoSplitSettingsType::kKey);
+ grid.catalogClient(opCtx)->getGlobalSettings(opCtx, AutoSplitSettingsType::kKey);
if (settingsObjStatus.isOK()) {
auto settingsStatus = AutoSplitSettingsType::fromBSON(settingsObjStatus.getValue());
if (!settingsStatus.isOK()) {
diff --git a/src/mongo/s/balancer_configuration.h b/src/mongo/s/balancer_configuration.h
index 2f5370d162c..71a3832f537 100644
--- a/src/mongo/s/balancer_configuration.h
+++ b/src/mongo/s/balancer_configuration.h
@@ -214,7 +214,7 @@ public:
/**
* Synchronous method, which writes the balancer mode to the configuration data.
*/
- Status setBalancerMode(OperationContext* txn, BalancerSettingsType::BalancerMode mode);
+ Status setBalancerMode(OperationContext* opCtx, BalancerSettingsType::BalancerMode mode);
/**
* Returns whether balancing is allowed based on both the enabled state of the balancer and the
@@ -254,27 +254,27 @@ public:
* This method is thread-safe but it doesn't make sense to be called from more than one thread
* at a time.
*/
- Status refreshAndCheck(OperationContext* txn);
+ Status refreshAndCheck(OperationContext* opCtx);
private:
/**
* Reloads the balancer configuration from the settings document. Fails if the settings document
* cannot be read, in which case the values will remain unchanged.
*/
- Status _refreshBalancerSettings(OperationContext* txn);
+ Status _refreshBalancerSettings(OperationContext* opCtx);
/**
* Reloads the chunk sizes configuration from the settings document. Fails if the settings
* document cannot be read or if any setting contains invalid value, in which case the offending
* value will remain unchanged.
*/
- Status _refreshChunkSizeSettings(OperationContext* txn);
+ Status _refreshChunkSizeSettings(OperationContext* opCtx);
/**
* Reloads the autosplit configuration from the settings document. Fails if the settings
* document cannot be read.
*/
- Status _refreshAutoSplitSettings(OperationContext* txn);
+ Status _refreshAutoSplitSettings(OperationContext* opCtx);
// The latest read balancer settings and a mutex to protect its swaps
mutable stdx::mutex _balancerSettingsMutex;
diff --git a/src/mongo/s/catalog/dist_lock_catalog.h b/src/mongo/s/catalog/dist_lock_catalog.h
index 8bccfc6c8a7..5b2c392eb28 100644
--- a/src/mongo/s/catalog/dist_lock_catalog.h
+++ b/src/mongo/s/catalog/dist_lock_catalog.h
@@ -73,13 +73,13 @@ public:
* Returns the ping document of the specified processID.
* Common status errors include socket errors.
*/
- virtual StatusWith<LockpingsType> getPing(OperationContext* txn, StringData processID) = 0;
+ virtual StatusWith<LockpingsType> getPing(OperationContext* opCtx, StringData processID) = 0;
/**
* Updates the ping document. Creates a new entry if it does not exists.
* Common status errors include socket errors.
*/
- virtual Status ping(OperationContext* txn, StringData processID, Date_t ping) = 0;
+ virtual Status ping(OperationContext* opCtx, StringData processID, Date_t ping) = 0;
/**
* Attempts to update the owner of a lock identified by lockID to lockSessionID.
@@ -98,7 +98,7 @@ public:
* Common status errors include socket and duplicate key errors.
*/
virtual StatusWith<LocksType> grabLock(
- OperationContext* txn,
+ OperationContext* opCtx,
StringData lockID,
const OID& lockSessionID,
StringData who,
@@ -122,7 +122,7 @@ public:
*
* Common status errors include socket errors.
*/
- virtual StatusWith<LocksType> overtakeLock(OperationContext* txn,
+ virtual StatusWith<LocksType> overtakeLock(OperationContext* opCtx,
StringData lockID,
const OID& lockSessionID,
const OID& currentHolderTS,
@@ -137,46 +137,47 @@ public:
* specified session (i.e., it is not owned at all or if it is owned by a different session).
* Otherwise, it returns an error status. Common errors include socket errors.
*/
- virtual Status unlock(OperationContext* txn, const OID& lockSessionID) = 0;
+ virtual Status unlock(OperationContext* opCtx, const OID& lockSessionID) = 0;
/**
* Same as unlock() above except that it unlocks the lock document that matches "lockSessionID"
* AND "name", rather than just "lockSessionID". This is necessary if multiple documents have
* been locked with the same lockSessionID.
*/
- virtual Status unlock(OperationContext* txn, const OID& lockSessionID, StringData name) = 0;
+ virtual Status unlock(OperationContext* opCtx, const OID& lockSessionID, StringData name) = 0;
/**
* Unlocks all distributed locks with the given owning process ID. Does not provide any
* indication as to how many locks were actually unlocked. So long as the update command runs
* successfully, returns OK, otherwise returns an error status.
*/
- virtual Status unlockAll(OperationContext* txn, const std::string& processID) = 0;
+ virtual Status unlockAll(OperationContext* opCtx, const std::string& processID) = 0;
/**
* Get some information from the config server primary.
* Common status errors include socket errors.
*/
- virtual StatusWith<ServerInfo> getServerInfo(OperationContext* txn) = 0;
+ virtual StatusWith<ServerInfo> getServerInfo(OperationContext* opCtx) = 0;
/**
* Returns the lock document.
* Returns LockNotFound if lock document doesn't exist.
* Common status errors include socket errors.
*/
- virtual StatusWith<LocksType> getLockByTS(OperationContext* txn, const OID& lockSessionID) = 0;
+ virtual StatusWith<LocksType> getLockByTS(OperationContext* opCtx,
+ const OID& lockSessionID) = 0;
/**
* Returns the lock document.
* Common status errors include socket errors.
*/
- virtual StatusWith<LocksType> getLockByName(OperationContext* txn, StringData name) = 0;
+ virtual StatusWith<LocksType> getLockByName(OperationContext* opCtx, StringData name) = 0;
/**
* Attempts to delete the ping document corresponding to the given processId.
* Common status errors include socket errors.
*/
- virtual Status stopPing(OperationContext* txn, StringData processId) = 0;
+ virtual Status stopPing(OperationContext* opCtx, StringData processId) = 0;
protected:
DistLockCatalog();
diff --git a/src/mongo/s/catalog/dist_lock_catalog_impl.cpp b/src/mongo/s/catalog/dist_lock_catalog_impl.cpp
index 754db72ae6f..ee094b0baf9 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_impl.cpp
+++ b/src/mongo/s/catalog/dist_lock_catalog_impl.cpp
@@ -164,10 +164,10 @@ DistLockCatalogImpl::DistLockCatalogImpl(ShardRegistry* shardRegistry)
DistLockCatalogImpl::~DistLockCatalogImpl() = default;
-StatusWith<LockpingsType> DistLockCatalogImpl::getPing(OperationContext* txn,
+StatusWith<LockpingsType> DistLockCatalogImpl::getPing(OperationContext* opCtx,
StringData processID) {
auto findResult = _findOnConfig(
- txn, kReadPref, _lockPingNS, BSON(LockpingsType::process() << processID), BSONObj(), 1);
+ opCtx, kReadPref, _lockPingNS, BSON(LockpingsType::process() << processID), BSONObj(), 1);
if (!findResult.isOK()) {
return findResult.getStatus();
@@ -191,7 +191,7 @@ StatusWith<LockpingsType> DistLockCatalogImpl::getPing(OperationContext* txn,
return pingDocResult.getValue();
}
-Status DistLockCatalogImpl::ping(OperationContext* txn, StringData processID, Date_t ping) {
+Status DistLockCatalogImpl::ping(OperationContext* opCtx, StringData processID, Date_t ping) {
auto request =
FindAndModifyRequest::makeUpdate(_lockPingNS,
BSON(LockpingsType::process() << processID),
@@ -200,7 +200,7 @@ Status DistLockCatalogImpl::ping(OperationContext* txn, StringData processID, Da
request.setWriteConcern(kMajorityWriteConcern);
auto resultStatus = _client->getConfigShard()->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
_locksNS.db().toString(),
request.toBSON(),
@@ -211,7 +211,7 @@ Status DistLockCatalogImpl::ping(OperationContext* txn, StringData processID, Da
return findAndModifyStatus.getStatus();
}
-StatusWith<LocksType> DistLockCatalogImpl::grabLock(OperationContext* txn,
+StatusWith<LocksType> DistLockCatalogImpl::grabLock(OperationContext* opCtx,
StringData lockID,
const OID& lockSessionID,
StringData who,
@@ -237,7 +237,7 @@ StatusWith<LocksType> DistLockCatalogImpl::grabLock(OperationContext* txn,
request.setWriteConcern(writeConcern);
auto resultStatus = _client->getConfigShard()->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
_locksNS.db().toString(),
request.toBSON(),
@@ -266,7 +266,7 @@ StatusWith<LocksType> DistLockCatalogImpl::grabLock(OperationContext* txn,
return locksTypeResult.getValue();
}
-StatusWith<LocksType> DistLockCatalogImpl::overtakeLock(OperationContext* txn,
+StatusWith<LocksType> DistLockCatalogImpl::overtakeLock(OperationContext* opCtx,
StringData lockID,
const OID& lockSessionID,
const OID& currentHolderTS,
@@ -294,7 +294,7 @@ StatusWith<LocksType> DistLockCatalogImpl::overtakeLock(OperationContext* txn,
request.setWriteConcern(kMajorityWriteConcern);
auto resultStatus = _client->getConfigShard()->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
_locksNS.db().toString(),
request.toBSON(),
@@ -317,16 +317,16 @@ StatusWith<LocksType> DistLockCatalogImpl::overtakeLock(OperationContext* txn,
return locksTypeResult.getValue();
}
-Status DistLockCatalogImpl::unlock(OperationContext* txn, const OID& lockSessionID) {
+Status DistLockCatalogImpl::unlock(OperationContext* opCtx, const OID& lockSessionID) {
FindAndModifyRequest request = FindAndModifyRequest::makeUpdate(
_locksNS,
BSON(LocksType::lockID(lockSessionID)),
BSON("$set" << BSON(LocksType::state(LocksType::UNLOCKED))));
request.setWriteConcern(kMajorityWriteConcern);
- return _unlock(txn, request);
+ return _unlock(opCtx, request);
}
-Status DistLockCatalogImpl::unlock(OperationContext* txn,
+Status DistLockCatalogImpl::unlock(OperationContext* opCtx,
const OID& lockSessionID,
StringData name) {
FindAndModifyRequest request = FindAndModifyRequest::makeUpdate(
@@ -334,12 +334,12 @@ Status DistLockCatalogImpl::unlock(OperationContext* txn,
BSON(LocksType::lockID(lockSessionID) << LocksType::name(name.toString())),
BSON("$set" << BSON(LocksType::state(LocksType::UNLOCKED))));
request.setWriteConcern(kMajorityWriteConcern);
- return _unlock(txn, request);
+ return _unlock(opCtx, request);
}
-Status DistLockCatalogImpl::_unlock(OperationContext* txn, const FindAndModifyRequest& request) {
+Status DistLockCatalogImpl::_unlock(OperationContext* opCtx, const FindAndModifyRequest& request) {
auto resultStatus = _client->getConfigShard()->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
_locksNS.db().toString(),
request.toBSON(),
@@ -357,7 +357,7 @@ Status DistLockCatalogImpl::_unlock(OperationContext* txn, const FindAndModifyRe
return findAndModifyStatus.getStatus();
}
-Status DistLockCatalogImpl::unlockAll(OperationContext* txn, const std::string& processID) {
+Status DistLockCatalogImpl::unlockAll(OperationContext* opCtx, const std::string& processID) {
std::unique_ptr<BatchedUpdateDocument> updateDoc(new BatchedUpdateDocument());
updateDoc->setQuery(BSON(LocksType::process(processID)));
updateDoc->setUpdateExpr(BSON("$set" << BSON(LocksType::state(LocksType::UNLOCKED))));
@@ -374,7 +374,7 @@ Status DistLockCatalogImpl::unlockAll(OperationContext* txn, const std::string&
BSONObj cmdObj = request.toBSON();
auto response = _client->getConfigShard()->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
_locksNS.db().toString(),
cmdObj,
@@ -403,9 +403,10 @@ Status DistLockCatalogImpl::unlockAll(OperationContext* txn, const std::string&
return batchResponse.toStatus();
}
-StatusWith<DistLockCatalog::ServerInfo> DistLockCatalogImpl::getServerInfo(OperationContext* txn) {
+StatusWith<DistLockCatalog::ServerInfo> DistLockCatalogImpl::getServerInfo(
+ OperationContext* opCtx) {
auto resultStatus = _client->getConfigShard()->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
kReadPref,
"admin",
BSON("serverStatus" << 1),
@@ -438,10 +439,10 @@ StatusWith<DistLockCatalog::ServerInfo> DistLockCatalogImpl::getServerInfo(Opera
return DistLockCatalog::ServerInfo(localTimeElem.date(), electionIdStatus.getValue());
}
-StatusWith<LocksType> DistLockCatalogImpl::getLockByTS(OperationContext* txn,
+StatusWith<LocksType> DistLockCatalogImpl::getLockByTS(OperationContext* opCtx,
const OID& lockSessionID) {
auto findResult = _findOnConfig(
- txn, kReadPref, _locksNS, BSON(LocksType::lockID(lockSessionID)), BSONObj(), 1);
+ opCtx, kReadPref, _locksNS, BSON(LocksType::lockID(lockSessionID)), BSONObj(), 1);
if (!findResult.isOK()) {
return findResult.getStatus();
@@ -465,9 +466,9 @@ StatusWith<LocksType> DistLockCatalogImpl::getLockByTS(OperationContext* txn,
return locksTypeResult.getValue();
}
-StatusWith<LocksType> DistLockCatalogImpl::getLockByName(OperationContext* txn, StringData name) {
+StatusWith<LocksType> DistLockCatalogImpl::getLockByName(OperationContext* opCtx, StringData name) {
auto findResult =
- _findOnConfig(txn, kReadPref, _locksNS, BSON(LocksType::name() << name), BSONObj(), 1);
+ _findOnConfig(opCtx, kReadPref, _locksNS, BSON(LocksType::name() << name), BSONObj(), 1);
if (!findResult.isOK()) {
return findResult.getStatus();
@@ -491,13 +492,13 @@ StatusWith<LocksType> DistLockCatalogImpl::getLockByName(OperationContext* txn,
return locksTypeResult.getValue();
}
-Status DistLockCatalogImpl::stopPing(OperationContext* txn, StringData processId) {
+Status DistLockCatalogImpl::stopPing(OperationContext* opCtx, StringData processId) {
auto request =
FindAndModifyRequest::makeRemove(_lockPingNS, BSON(LockpingsType::process() << processId));
request.setWriteConcern(kMajorityWriteConcern);
auto resultStatus = _client->getConfigShard()->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
_locksNS.db().toString(),
request.toBSON(),
@@ -509,14 +510,14 @@ Status DistLockCatalogImpl::stopPing(OperationContext* txn, StringData processId
}
StatusWith<vector<BSONObj>> DistLockCatalogImpl::_findOnConfig(
- OperationContext* txn,
+ OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const NamespaceString& nss,
const BSONObj& query,
const BSONObj& sort,
boost::optional<long long> limit) {
auto result = _client->getConfigShard()->exhaustiveFindOnConfig(
- txn, readPref, repl::ReadConcernLevel::kMajorityReadConcern, nss, query, sort, limit);
+ opCtx, readPref, repl::ReadConcernLevel::kMajorityReadConcern, nss, query, sort, limit);
if (!result.isOK()) {
return result.getStatus();
}
diff --git a/src/mongo/s/catalog/dist_lock_catalog_impl.h b/src/mongo/s/catalog/dist_lock_catalog_impl.h
index 6f26a3a1235..ab1c88bd250 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_impl.h
+++ b/src/mongo/s/catalog/dist_lock_catalog_impl.h
@@ -52,11 +52,12 @@ public:
virtual ~DistLockCatalogImpl();
- virtual StatusWith<LockpingsType> getPing(OperationContext* txn, StringData processID) override;
+ virtual StatusWith<LockpingsType> getPing(OperationContext* opCtx,
+ StringData processID) override;
- virtual Status ping(OperationContext* txn, StringData processID, Date_t ping) override;
+ virtual Status ping(OperationContext* opCtx, StringData processID, Date_t ping) override;
- virtual StatusWith<LocksType> grabLock(OperationContext* txn,
+ virtual StatusWith<LocksType> grabLock(OperationContext* opCtx,
StringData lockID,
const OID& lockSessionID,
StringData who,
@@ -65,7 +66,7 @@ public:
StringData why,
const WriteConcernOptions& writeConcern) override;
- virtual StatusWith<LocksType> overtakeLock(OperationContext* txn,
+ virtual StatusWith<LocksType> overtakeLock(OperationContext* opCtx,
StringData lockID,
const OID& lockSessionID,
const OID& currentHolderTS,
@@ -74,27 +75,27 @@ public:
Date_t time,
StringData why) override;
- virtual Status unlock(OperationContext* txn, const OID& lockSessionID) override;
+ virtual Status unlock(OperationContext* opCtx, const OID& lockSessionID) override;
- virtual Status unlock(OperationContext* txn,
+ virtual Status unlock(OperationContext* opCtx,
const OID& lockSessionID,
StringData name) override;
- virtual Status unlockAll(OperationContext* txn, const std::string& processID) override;
+ virtual Status unlockAll(OperationContext* opCtx, const std::string& processID) override;
- virtual StatusWith<ServerInfo> getServerInfo(OperationContext* txn) override;
+ virtual StatusWith<ServerInfo> getServerInfo(OperationContext* opCtx) override;
- virtual StatusWith<LocksType> getLockByTS(OperationContext* txn,
+ virtual StatusWith<LocksType> getLockByTS(OperationContext* opCtx,
const OID& lockSessionID) override;
- virtual StatusWith<LocksType> getLockByName(OperationContext* txn, StringData name) override;
+ virtual StatusWith<LocksType> getLockByName(OperationContext* opCtx, StringData name) override;
- virtual Status stopPing(OperationContext* txn, StringData processId) override;
+ virtual Status stopPing(OperationContext* opCtx, StringData processId) override;
private:
- Status _unlock(OperationContext* txn, const FindAndModifyRequest& request);
+ Status _unlock(OperationContext* opCtx, const FindAndModifyRequest& request);
- StatusWith<std::vector<BSONObj>> _findOnConfig(OperationContext* txn,
+ StatusWith<std::vector<BSONObj>> _findOnConfig(OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const NamespaceString& nss,
const BSONObj& query,
diff --git a/src/mongo/s/catalog/dist_lock_catalog_mock.cpp b/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
index c3b27185eef..2a31d602e7a 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
+++ b/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
@@ -144,7 +144,7 @@ DistLockCatalogMock::DistLockCatalogMock()
DistLockCatalogMock::~DistLockCatalogMock() {}
-StatusWith<LockpingsType> DistLockCatalogMock::getPing(OperationContext* txn,
+StatusWith<LockpingsType> DistLockCatalogMock::getPing(OperationContext* opCtx,
StringData processID) {
auto ret = kLockpingsTypeBadRetValue;
GetPingFunc checkerFunc = noGetPingSet;
@@ -159,7 +159,7 @@ StatusWith<LockpingsType> DistLockCatalogMock::getPing(OperationContext* txn,
return ret;
}
-Status DistLockCatalogMock::ping(OperationContext* txn, StringData processID, Date_t ping) {
+Status DistLockCatalogMock::ping(OperationContext* opCtx, StringData processID, Date_t ping) {
auto ret = kBadRetValue;
PingFunc checkerFunc = noPingFuncSet;
@@ -173,7 +173,7 @@ Status DistLockCatalogMock::ping(OperationContext* txn, StringData processID, Da
return ret;
}
-StatusWith<LocksType> DistLockCatalogMock::grabLock(OperationContext* txn,
+StatusWith<LocksType> DistLockCatalogMock::grabLock(OperationContext* opCtx,
StringData lockID,
const OID& lockSessionID,
StringData who,
@@ -194,7 +194,7 @@ StatusWith<LocksType> DistLockCatalogMock::grabLock(OperationContext* txn,
return ret;
}
-StatusWith<LocksType> DistLockCatalogMock::overtakeLock(OperationContext* txn,
+StatusWith<LocksType> DistLockCatalogMock::overtakeLock(OperationContext* opCtx,
StringData lockID,
const OID& lockSessionID,
const OID& currentHolderTS,
@@ -215,7 +215,7 @@ StatusWith<LocksType> DistLockCatalogMock::overtakeLock(OperationContext* txn,
return ret;
}
-Status DistLockCatalogMock::unlock(OperationContext* txn, const OID& lockSessionID) {
+Status DistLockCatalogMock::unlock(OperationContext* opCtx, const OID& lockSessionID) {
auto ret = kBadRetValue;
UnlockFunc checkerFunc = noUnLockFuncSet;
@@ -229,7 +229,7 @@ Status DistLockCatalogMock::unlock(OperationContext* txn, const OID& lockSession
return ret;
}
-Status DistLockCatalogMock::unlock(OperationContext* txn,
+Status DistLockCatalogMock::unlock(OperationContext* opCtx,
const OID& lockSessionID,
StringData name) {
auto ret = kBadRetValue;
@@ -246,7 +246,8 @@ Status DistLockCatalogMock::unlock(OperationContext* txn,
return ret;
}
-StatusWith<DistLockCatalog::ServerInfo> DistLockCatalogMock::getServerInfo(OperationContext* txn) {
+StatusWith<DistLockCatalog::ServerInfo> DistLockCatalogMock::getServerInfo(
+ OperationContext* opCtx) {
auto ret = kServerInfoBadRetValue;
GetServerInfoFunc checkerFunc = noGetServerInfoSet;
@@ -260,7 +261,7 @@ StatusWith<DistLockCatalog::ServerInfo> DistLockCatalogMock::getServerInfo(Opera
return ret;
}
-StatusWith<LocksType> DistLockCatalogMock::getLockByTS(OperationContext* txn,
+StatusWith<LocksType> DistLockCatalogMock::getLockByTS(OperationContext* opCtx,
const OID& lockSessionID) {
auto ret = kLocksTypeBadRetValue;
GetLockByTSFunc checkerFunc = noGetLockByTSSet;
@@ -275,7 +276,7 @@ StatusWith<LocksType> DistLockCatalogMock::getLockByTS(OperationContext* txn,
return ret;
}
-StatusWith<LocksType> DistLockCatalogMock::getLockByName(OperationContext* txn, StringData name) {
+StatusWith<LocksType> DistLockCatalogMock::getLockByName(OperationContext* opCtx, StringData name) {
auto ret = kLocksTypeBadRetValue;
GetLockByNameFunc checkerFunc = noGetLockByNameSet;
@@ -289,7 +290,7 @@ StatusWith<LocksType> DistLockCatalogMock::getLockByName(OperationContext* txn,
return ret;
}
-Status DistLockCatalogMock::stopPing(OperationContext* txn, StringData processId) {
+Status DistLockCatalogMock::stopPing(OperationContext* opCtx, StringData processId) {
auto ret = kBadRetValue;
StopPingFunc checkerFunc = noStopPingFuncSet;
@@ -370,7 +371,7 @@ void DistLockCatalogMock::expectGetServerInfo(GetServerInfoFunc checkerFunc,
_getServerInfoReturnValue = returnThis;
}
-Status DistLockCatalogMock::unlockAll(OperationContext* txn, const std::string& processID) {
+Status DistLockCatalogMock::unlockAll(OperationContext* opCtx, const std::string& processID) {
return Status(ErrorCodes::IllegalOperation,
str::stream() << "unlockAll not expected to be called; processID: " << processID);
}
diff --git a/src/mongo/s/catalog/dist_lock_catalog_mock.h b/src/mongo/s/catalog/dist_lock_catalog_mock.h
index e29b5aa5fc7..7ea0eaa91f2 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_mock.h
+++ b/src/mongo/s/catalog/dist_lock_catalog_mock.h
@@ -89,11 +89,12 @@ public:
using GetLockByNameFunc = stdx::function<void(StringData name)>;
using GetServerInfoFunc = stdx::function<void()>;
- virtual StatusWith<LockpingsType> getPing(OperationContext* txn, StringData processID) override;
+ virtual StatusWith<LockpingsType> getPing(OperationContext* opCtx,
+ StringData processID) override;
- virtual Status ping(OperationContext* txn, StringData processID, Date_t ping) override;
+ virtual Status ping(OperationContext* opCtx, StringData processID, Date_t ping) override;
- virtual StatusWith<LocksType> grabLock(OperationContext* txn,
+ virtual StatusWith<LocksType> grabLock(OperationContext* opCtx,
StringData lockID,
const OID& lockSessionID,
StringData who,
@@ -102,7 +103,7 @@ public:
StringData why,
const WriteConcernOptions& writeConcern) override;
- virtual StatusWith<LocksType> overtakeLock(OperationContext* txn,
+ virtual StatusWith<LocksType> overtakeLock(OperationContext* opCtx,
StringData lockID,
const OID& lockSessionID,
const OID& currentHolderTS,
@@ -111,22 +112,22 @@ public:
Date_t time,
StringData why) override;
- virtual Status unlock(OperationContext* txn, const OID& lockSessionID) override;
+ virtual Status unlock(OperationContext* opCtx, const OID& lockSessionID) override;
- virtual Status unlock(OperationContext* txn,
+ virtual Status unlock(OperationContext* opCtx,
const OID& lockSessionID,
StringData name) override;
- virtual Status unlockAll(OperationContext* txn, const std::string& processID) override;
+ virtual Status unlockAll(OperationContext* opCtx, const std::string& processID) override;
- virtual StatusWith<ServerInfo> getServerInfo(OperationContext* txn) override;
+ virtual StatusWith<ServerInfo> getServerInfo(OperationContext* opCtx) override;
- virtual StatusWith<LocksType> getLockByTS(OperationContext* txn,
+ virtual StatusWith<LocksType> getLockByTS(OperationContext* opCtx,
const OID& lockSessionID) override;
- virtual StatusWith<LocksType> getLockByName(OperationContext* txn, StringData name) override;
+ virtual StatusWith<LocksType> getLockByName(OperationContext* opCtx, StringData name) override;
- virtual Status stopPing(OperationContext* txn, StringData processId) override;
+ virtual Status stopPing(OperationContext* opCtx, StringData processId) override;
/**
* Sets the checker method to use and the return value for grabLock to return every
diff --git a/src/mongo/s/catalog/dist_lock_manager.cpp b/src/mongo/s/catalog/dist_lock_manager.cpp
index 426d62cb7e9..9e6326ecf91 100644
--- a/src/mongo/s/catalog/dist_lock_manager.cpp
+++ b/src/mongo/s/catalog/dist_lock_manager.cpp
@@ -39,19 +39,19 @@ namespace mongo {
const Seconds DistLockManager::kDefaultLockTimeout(20);
const Milliseconds DistLockManager::kSingleLockAttemptTimeout(0);
-DistLockManager::ScopedDistLock::ScopedDistLock(OperationContext* txn,
+DistLockManager::ScopedDistLock::ScopedDistLock(OperationContext* opCtx,
DistLockHandle lockHandle,
DistLockManager* lockManager)
- : _txn(txn), _lockID(std::move(lockHandle)), _lockManager(lockManager) {}
+ : _opCtx(opCtx), _lockID(std::move(lockHandle)), _lockManager(lockManager) {}
DistLockManager::ScopedDistLock::~ScopedDistLock() {
if (_lockManager) {
- _lockManager->unlock(_txn, _lockID);
+ _lockManager->unlock(_opCtx, _lockID);
}
}
DistLockManager::ScopedDistLock::ScopedDistLock(ScopedDistLock&& other)
- : _txn(nullptr), _lockManager(nullptr) {
+ : _opCtx(nullptr), _lockManager(nullptr) {
*this = std::move(other);
}
@@ -59,9 +59,9 @@ DistLockManager::ScopedDistLock& DistLockManager::ScopedDistLock::operator=(
ScopedDistLock&& other) {
if (this != &other) {
invariant(_lockManager == nullptr);
- invariant(_txn == nullptr);
+ invariant(_opCtx == nullptr);
- _txn = other._txn;
+ _opCtx = other._opCtx;
_lockID = std::move(other._lockID);
_lockManager = other._lockManager;
other._lockManager = nullptr;
@@ -70,16 +70,16 @@ DistLockManager::ScopedDistLock& DistLockManager::ScopedDistLock::operator=(
return *this;
}
-StatusWith<DistLockManager::ScopedDistLock> DistLockManager::lock(OperationContext* txn,
+StatusWith<DistLockManager::ScopedDistLock> DistLockManager::lock(OperationContext* opCtx,
StringData name,
StringData whyMessage,
Milliseconds waitFor) {
- auto distLockHandleStatus = lockWithSessionID(txn, name, whyMessage, OID::gen(), waitFor);
+ auto distLockHandleStatus = lockWithSessionID(opCtx, name, whyMessage, OID::gen(), waitFor);
if (!distLockHandleStatus.isOK()) {
return distLockHandleStatus.getStatus();
}
- return DistLockManager::ScopedDistLock(txn, std::move(distLockHandleStatus.getValue()), this);
+ return DistLockManager::ScopedDistLock(opCtx, std::move(distLockHandleStatus.getValue()), this);
}
Status DistLockManager::ScopedDistLock::checkStatus() {
@@ -87,7 +87,7 @@ Status DistLockManager::ScopedDistLock::checkStatus() {
return Status(ErrorCodes::IllegalOperation, "no lock manager, lock was not acquired");
}
- return _lockManager->checkStatus(_txn, _lockID);
+ return _lockManager->checkStatus(_opCtx, _lockID);
}
} // namespace mongo
diff --git a/src/mongo/s/catalog/dist_lock_manager.h b/src/mongo/s/catalog/dist_lock_manager.h
index 0512a5dc481..18bb6bce786 100644
--- a/src/mongo/s/catalog/dist_lock_manager.h
+++ b/src/mongo/s/catalog/dist_lock_manager.h
@@ -76,7 +76,7 @@ public:
MONGO_DISALLOW_COPYING(ScopedDistLock);
public:
- ScopedDistLock(OperationContext* txn,
+ ScopedDistLock(OperationContext* opCtx,
DistLockHandle lockHandle,
DistLockManager* lockManager);
~ScopedDistLock();
@@ -90,7 +90,7 @@ public:
Status checkStatus();
private:
- OperationContext* _txn;
+ OperationContext* _opCtx;
DistLockHandle _lockID;
DistLockManager* _lockManager; // Not owned here.
};
@@ -107,7 +107,7 @@ public:
* Cleanup the manager's resources. Implementations do not need to guarantee thread safety
* so callers should employ proper synchronization when calling this method.
*/
- virtual void shutDown(OperationContext* txn) = 0;
+ virtual void shutDown(OperationContext* opCtx) = 0;
/**
* Returns the process ID for this DistLockManager.
@@ -127,7 +127,7 @@ public:
* Returns ErrorCodes::DistributedClockSkewed when a clock skew is detected.
* Returns ErrorCodes::LockBusy if the lock is being held.
*/
- StatusWith<ScopedDistLock> lock(OperationContext* txn,
+ StatusWith<ScopedDistLock> lock(OperationContext* opCtx,
StringData name,
StringData whyMessage,
Milliseconds waitFor);
@@ -140,7 +140,7 @@ public:
* immediately reacquired if "lockSessionID" matches that of the lock, rather than waiting for
* the inactive lock to expire.
*/
- virtual StatusWith<DistLockHandle> lockWithSessionID(OperationContext* txn,
+ virtual StatusWith<DistLockHandle> lockWithSessionID(OperationContext* opCtx,
StringData name,
StringData whyMessage,
const OID& lockSessionID,
@@ -151,7 +151,7 @@ public:
* anyone. Uses local write concern and does not attempt to overtake the lock or check whether
* the lock lease has expired.
*/
- virtual StatusWith<DistLockHandle> tryLockWithLocalWriteConcern(OperationContext* txn,
+ virtual StatusWith<DistLockHandle> tryLockWithLocalWriteConcern(OperationContext* opCtx,
StringData name,
StringData whyMessage,
const OID& lockSessionID) = 0;
@@ -160,26 +160,26 @@ public:
* Unlocks the given lockHandle. Will attempt to retry again later if the config
* server is not reachable.
*/
- virtual void unlock(OperationContext* txn, const DistLockHandle& lockHandle) = 0;
+ virtual void unlock(OperationContext* opCtx, const DistLockHandle& lockHandle) = 0;
/**
* Unlocks the lock specified by "lockHandle" and "name". Will attempt to retry again later if
* the config server is not reachable.
*/
- virtual void unlock(OperationContext* txn,
+ virtual void unlock(OperationContext* opCtx,
const DistLockHandle& lockHandle,
StringData name) = 0;
/**
* Makes a best-effort attempt to unlock all locks owned by the given processID.
*/
- virtual void unlockAll(OperationContext* txn, const std::string& processID) = 0;
+ virtual void unlockAll(OperationContext* opCtx, const std::string& processID) = 0;
protected:
/**
* Checks if the lockHandle still exists in the config server.
*/
- virtual Status checkStatus(OperationContext* txn, const DistLockHandle& lockHandle) = 0;
+ virtual Status checkStatus(OperationContext* opCtx, const DistLockHandle& lockHandle) = 0;
};
} // namespace mongo
diff --git a/src/mongo/s/catalog/dist_lock_manager_mock.cpp b/src/mongo/s/catalog/dist_lock_manager_mock.cpp
index 18bd8a8ba6c..710eca59cf6 100644
--- a/src/mongo/s/catalog/dist_lock_manager_mock.cpp
+++ b/src/mongo/s/catalog/dist_lock_manager_mock.cpp
@@ -59,7 +59,7 @@ DistLockManagerMock::DistLockManagerMock(std::unique_ptr<DistLockCatalog> catalo
void DistLockManagerMock::startUp() {}
-void DistLockManagerMock::shutDown(OperationContext* txn) {
+void DistLockManagerMock::shutDown(OperationContext* opCtx) {
uassert(28659, "DistLockManagerMock shut down with outstanding locks present", _locks.empty());
}
@@ -67,7 +67,7 @@ std::string DistLockManagerMock::getProcessID() {
return "Mock dist lock manager process id";
}
-StatusWith<DistLockHandle> DistLockManagerMock::lockWithSessionID(OperationContext* txn,
+StatusWith<DistLockHandle> DistLockManagerMock::lockWithSessionID(OperationContext* opCtx,
StringData name,
StringData whyMessage,
const OID& lockSessionID,
@@ -95,17 +95,17 @@ StatusWith<DistLockHandle> DistLockManagerMock::lockWithSessionID(OperationConte
}
StatusWith<DistLockHandle> DistLockManagerMock::tryLockWithLocalWriteConcern(
- OperationContext* txn, StringData name, StringData whyMessage, const OID& lockSessionID) {
+ OperationContext* opCtx, StringData name, StringData whyMessage, const OID& lockSessionID) {
// Not yet implemented
MONGO_UNREACHABLE;
}
-void DistLockManagerMock::unlockAll(OperationContext* txn, const std::string& processID) {
+void DistLockManagerMock::unlockAll(OperationContext* opCtx, const std::string& processID) {
// Not yet implemented
MONGO_UNREACHABLE;
}
-void DistLockManagerMock::unlock(OperationContext* txn, const DistLockHandle& lockHandle) {
+void DistLockManagerMock::unlock(OperationContext* opCtx, const DistLockHandle& lockHandle) {
std::vector<LockInfo>::iterator it =
std::find_if(_locks.begin(), _locks.end(), [&lockHandle](LockInfo info) -> bool {
return info.lockID == lockHandle;
@@ -116,7 +116,7 @@ void DistLockManagerMock::unlock(OperationContext* txn, const DistLockHandle& lo
_locks.erase(it);
}
-void DistLockManagerMock::unlock(OperationContext* txn,
+void DistLockManagerMock::unlock(OperationContext* opCtx,
const DistLockHandle& lockHandle,
StringData name) {
std::vector<LockInfo>::iterator it =
@@ -129,7 +129,7 @@ void DistLockManagerMock::unlock(OperationContext* txn,
_locks.erase(it);
}
-Status DistLockManagerMock::checkStatus(OperationContext* txn, const DistLockHandle& lockHandle) {
+Status DistLockManagerMock::checkStatus(OperationContext* opCtx, const DistLockHandle& lockHandle) {
return Status::OK();
}
diff --git a/src/mongo/s/catalog/dist_lock_manager_mock.h b/src/mongo/s/catalog/dist_lock_manager_mock.h
index d137b0239e4..17d91d94b19 100644
--- a/src/mongo/s/catalog/dist_lock_manager_mock.h
+++ b/src/mongo/s/catalog/dist_lock_manager_mock.h
@@ -44,22 +44,22 @@ public:
virtual ~DistLockManagerMock() = default;
void startUp() override;
- void shutDown(OperationContext* txn) override;
+ void shutDown(OperationContext* opCtx) override;
std::string getProcessID() override;
- StatusWith<DistLockHandle> lockWithSessionID(OperationContext* txn,
+ StatusWith<DistLockHandle> lockWithSessionID(OperationContext* opCtx,
StringData name,
StringData whyMessage,
const OID& lockSessionID,
Milliseconds waitFor) override;
- StatusWith<DistLockHandle> tryLockWithLocalWriteConcern(OperationContext* txn,
+ StatusWith<DistLockHandle> tryLockWithLocalWriteConcern(OperationContext* opCtx,
StringData name,
StringData whyMessage,
const OID& lockSessionID) override;
- void unlockAll(OperationContext* txn, const std::string& processID) override;
+ void unlockAll(OperationContext* opCtx, const std::string& processID) override;
using LockFunc =
stdx::function<void(StringData name, StringData whyMessage, Milliseconds waitFor)>;
@@ -67,11 +67,13 @@ public:
void expectLock(LockFunc checkerFunc, Status lockStatus);
protected:
- void unlock(OperationContext* txn, const DistLockHandle& lockHandle) override;
+ void unlock(OperationContext* opCtx, const DistLockHandle& lockHandle) override;
- void unlock(OperationContext* txn, const DistLockHandle& lockHandle, StringData name) override;
+ void unlock(OperationContext* opCtx,
+ const DistLockHandle& lockHandle,
+ StringData name) override;
- Status checkStatus(OperationContext* txn, const DistLockHandle& lockHandle) override;
+ Status checkStatus(OperationContext* opCtx, const DistLockHandle& lockHandle) override;
private:
struct LockInfo {
diff --git a/src/mongo/s/catalog/replset_dist_lock_manager.cpp b/src/mongo/s/catalog/replset_dist_lock_manager.cpp
index eff613f66b6..98fbcf94ceb 100644
--- a/src/mongo/s/catalog/replset_dist_lock_manager.cpp
+++ b/src/mongo/s/catalog/replset_dist_lock_manager.cpp
@@ -89,7 +89,7 @@ void ReplSetDistLockManager::startUp() {
}
}
-void ReplSetDistLockManager::shutDown(OperationContext* txn) {
+void ReplSetDistLockManager::shutDown(OperationContext* opCtx) {
{
stdx::lock_guard<stdx::mutex> lk(_mutex);
_isShutDown = true;
@@ -103,7 +103,7 @@ void ReplSetDistLockManager::shutDown(OperationContext* txn) {
_execThread.reset();
}
- auto status = _catalog->stopPing(txn, _processID);
+ auto status = _catalog->stopPing(opCtx, _processID);
if (!status.isOK()) {
warning() << "error encountered while cleaning up distributed ping entry for " << _processID
<< causedBy(redact(status));
@@ -128,8 +128,8 @@ void ReplSetDistLockManager::doTask() {
while (!isShutDown()) {
{
- auto txn = cc().makeOperationContext();
- auto pingStatus = _catalog->ping(txn.get(), _processID, Date_t::now());
+ auto opCtx = cc().makeOperationContext();
+ auto pingStatus = _catalog->ping(opCtx.get(), _processID, Date_t::now());
if (!pingStatus.isOK() && pingStatus != ErrorCodes::NotMaster) {
warning() << "pinging failed for distributed lock pinger" << causedBy(pingStatus);
@@ -154,10 +154,10 @@ void ReplSetDistLockManager::doTask() {
"status unlock not initialized!");
if (toUnlock.second) {
// A non-empty _id (name) field was provided, unlock by ts (sessionId) and _id.
- unlockStatus = _catalog->unlock(txn.get(), toUnlock.first, *toUnlock.second);
+ unlockStatus = _catalog->unlock(opCtx.get(), toUnlock.first, *toUnlock.second);
nameMessage = " and " + LocksType::name() + ": " + *toUnlock.second;
} else {
- unlockStatus = _catalog->unlock(txn.get(), toUnlock.first);
+ unlockStatus = _catalog->unlock(opCtx.get(), toUnlock.first);
}
if (!unlockStatus.isOK()) {
@@ -180,11 +180,11 @@ void ReplSetDistLockManager::doTask() {
}
}
-StatusWith<bool> ReplSetDistLockManager::isLockExpired(OperationContext* txn,
+StatusWith<bool> ReplSetDistLockManager::isLockExpired(OperationContext* opCtx,
LocksType lockDoc,
const Milliseconds& lockExpiration) {
const auto& processID = lockDoc.getProcess();
- auto pingStatus = _catalog->getPing(txn, processID);
+ auto pingStatus = _catalog->getPing(opCtx, processID);
Date_t pingValue;
if (pingStatus.isOK()) {
@@ -202,7 +202,7 @@ StatusWith<bool> ReplSetDistLockManager::isLockExpired(OperationContext* txn,
} // else use default pingValue if ping document does not exist.
Timer timer(_serviceContext->getTickSource());
- auto serverInfoStatus = _catalog->getServerInfo(txn);
+ auto serverInfoStatus = _catalog->getServerInfo(opCtx);
if (!serverInfoStatus.isOK()) {
if (serverInfoStatus.getStatus() == ErrorCodes::NotMaster) {
return false;
@@ -277,7 +277,7 @@ StatusWith<bool> ReplSetDistLockManager::isLockExpired(OperationContext* txn,
return false;
}
-StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationContext* txn,
+StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationContext* opCtx,
StringData name,
StringData whyMessage,
const OID& lockSessionID,
@@ -290,7 +290,7 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
// independent write operations.
int networkErrorRetries = 0;
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
// Distributed lock acquisition works by tring to update the state of the lock to 'taken'. If
// the lock is currently taken, we will back off and try the acquisition again, repeating this
@@ -312,7 +312,7 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
<< " with lockSessionID: " << lockSessionID << ", why: " << whyMessage.toString();
auto lockResult = _catalog->grabLock(
- txn, name, lockSessionID, who, _processID, Date_t::now(), whyMessage.toString());
+ opCtx, name, lockSessionID, who, _processID, Date_t::now(), whyMessage.toString());
auto status = lockResult.getStatus();
@@ -333,7 +333,7 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
networkErrorRetries++;
- status = _catalog->unlock(txn, lockSessionID, name);
+ status = _catalog->unlock(opCtx, lockSessionID, name);
if (status.isOK()) {
// We certainly do not own the lock, so we can retry
continue;
@@ -355,7 +355,7 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
}
// Get info from current lock and check if we can overtake it.
- auto getLockStatusResult = _catalog->getLockByName(txn, name);
+ auto getLockStatusResult = _catalog->getLockByName(opCtx, name);
const auto& getLockStatus = getLockStatusResult.getStatus();
if (!getLockStatusResult.isOK() && getLockStatus != ErrorCodes::LockNotFound) {
@@ -366,14 +366,14 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
// found, use the normal grab lock path to acquire it.
if (getLockStatusResult.isOK()) {
auto currentLock = getLockStatusResult.getValue();
- auto isLockExpiredResult = isLockExpired(txn, currentLock, lockExpiration);
+ auto isLockExpiredResult = isLockExpired(opCtx, currentLock, lockExpiration);
if (!isLockExpiredResult.isOK()) {
return isLockExpiredResult.getStatus();
}
if (isLockExpiredResult.getValue() || (lockSessionID == currentLock.getLockID())) {
- auto overtakeResult = _catalog->overtakeLock(txn,
+ auto overtakeResult = _catalog->overtakeLock(opCtx,
name,
lockSessionID,
currentLock.getLockID(),
@@ -429,7 +429,7 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
}
StatusWith<DistLockHandle> ReplSetDistLockManager::tryLockWithLocalWriteConcern(
- OperationContext* txn, StringData name, StringData whyMessage, const OID& lockSessionID) {
+ OperationContext* opCtx, StringData name, StringData whyMessage, const OID& lockSessionID) {
const string who = str::stream() << _processID << ":" << getThreadName();
LOG(1) << "trying to acquire new distributed lock for " << name
@@ -438,7 +438,7 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::tryLockWithLocalWriteConcern(
<< " ms, process : " << _processID << " )"
<< " with lockSessionID: " << lockSessionID << ", why: " << whyMessage.toString();
- auto lockStatus = _catalog->grabLock(txn,
+ auto lockStatus = _catalog->grabLock(opCtx,
name,
lockSessionID,
who,
@@ -462,8 +462,8 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::tryLockWithLocalWriteConcern(
return lockStatus.getStatus();
}
-void ReplSetDistLockManager::unlock(OperationContext* txn, const DistLockHandle& lockSessionID) {
- auto unlockStatus = _catalog->unlock(txn, lockSessionID);
+void ReplSetDistLockManager::unlock(OperationContext* opCtx, const DistLockHandle& lockSessionID) {
+ auto unlockStatus = _catalog->unlock(opCtx, lockSessionID);
if (!unlockStatus.isOK()) {
queueUnlock(lockSessionID, boost::none);
@@ -473,10 +473,10 @@ void ReplSetDistLockManager::unlock(OperationContext* txn, const DistLockHandle&
}
}
-void ReplSetDistLockManager::unlock(OperationContext* txn,
+void ReplSetDistLockManager::unlock(OperationContext* opCtx,
const DistLockHandle& lockSessionID,
StringData name) {
- auto unlockStatus = _catalog->unlock(txn, lockSessionID, name);
+ auto unlockStatus = _catalog->unlock(opCtx, lockSessionID, name);
if (!unlockStatus.isOK()) {
queueUnlock(lockSessionID, name.toString());
@@ -486,17 +486,17 @@ void ReplSetDistLockManager::unlock(OperationContext* txn,
}
}
-void ReplSetDistLockManager::unlockAll(OperationContext* txn, const std::string& processID) {
- Status status = _catalog->unlockAll(txn, processID);
+void ReplSetDistLockManager::unlockAll(OperationContext* opCtx, const std::string& processID) {
+ Status status = _catalog->unlockAll(opCtx, processID);
if (!status.isOK()) {
warning() << "Error while trying to unlock existing distributed locks"
<< causedBy(redact(status));
}
}
-Status ReplSetDistLockManager::checkStatus(OperationContext* txn,
+Status ReplSetDistLockManager::checkStatus(OperationContext* opCtx,
const DistLockHandle& lockHandle) {
- return _catalog->getLockByTS(txn, lockHandle).getStatus();
+ return _catalog->getLockByTS(opCtx, lockHandle).getStatus();
}
void ReplSetDistLockManager::queueUnlock(const DistLockHandle& lockSessionID,
diff --git a/src/mongo/s/catalog/replset_dist_lock_manager.h b/src/mongo/s/catalog/replset_dist_lock_manager.h
index d6db97ebc7a..278e6e4a78c 100644
--- a/src/mongo/s/catalog/replset_dist_lock_manager.h
+++ b/src/mongo/s/catalog/replset_dist_lock_manager.h
@@ -64,31 +64,31 @@ public:
virtual ~ReplSetDistLockManager();
void startUp() override;
- void shutDown(OperationContext* txn) override;
+ void shutDown(OperationContext* opCtx) override;
std::string getProcessID() override;
- StatusWith<DistLockHandle> lockWithSessionID(OperationContext* txn,
+ StatusWith<DistLockHandle> lockWithSessionID(OperationContext* opCtx,
StringData name,
StringData whyMessage,
const OID& lockSessionID,
Milliseconds waitFor) override;
- StatusWith<DistLockHandle> tryLockWithLocalWriteConcern(OperationContext* txn,
+ StatusWith<DistLockHandle> tryLockWithLocalWriteConcern(OperationContext* opCtx,
StringData name,
StringData whyMessage,
const OID& lockSessionID) override;
- void unlock(OperationContext* txn, const DistLockHandle& lockSessionID) override;
+ void unlock(OperationContext* opCtx, const DistLockHandle& lockSessionID) override;
- void unlock(OperationContext* txn,
+ void unlock(OperationContext* opCtx,
const DistLockHandle& lockSessionID,
StringData name) override;
- void unlockAll(OperationContext* txn, const std::string& processID) override;
+ void unlockAll(OperationContext* opCtx, const std::string& processID) override;
protected:
- Status checkStatus(OperationContext* txn, const DistLockHandle& lockSessionID) override;
+ Status checkStatus(OperationContext* opCtx, const DistLockHandle& lockSessionID) override;
private:
/**
@@ -110,7 +110,7 @@ private:
* Returns true if the current process that owns the lock has no fresh pings since
* the lock expiration threshold.
*/
- StatusWith<bool> isLockExpired(OperationContext* txn,
+ StatusWith<bool> isLockExpired(OperationContext* opCtx,
const LocksType lockDoc,
const Milliseconds& lockExpiration);
diff --git a/src/mongo/s/catalog/sharding_catalog_client.h b/src/mongo/s/catalog/sharding_catalog_client.h
index f334c05a477..ee0b5a2384f 100644
--- a/src/mongo/s/catalog/sharding_catalog_client.h
+++ b/src/mongo/s/catalog/sharding_catalog_client.h
@@ -108,7 +108,7 @@ public:
/**
* Performs necessary cleanup when shutting down cleanly.
*/
- virtual void shutDown(OperationContext* txn) = 0;
+ virtual void shutDown(OperationContext* opCtx) = 0;
/**
* Creates a new database or updates the sharding status for an existing one. Cannot be
@@ -120,7 +120,7 @@ public:
* - DatabaseDifferCase - database already exists, but with a different case
* - ShardNotFound - could not find a shard to place the DB on
*/
- virtual Status enableSharding(OperationContext* txn, const std::string& dbName) = 0;
+ virtual Status enableSharding(OperationContext* opCtx, const std::string& dbName) = 0;
/**
* Shards a collection. Assumes that the database is enabled for sharding.
@@ -141,7 +141,7 @@ public:
* operations are writing to the same output collection, for instance.
*
*/
- virtual Status shardCollection(OperationContext* txn,
+ virtual Status shardCollection(OperationContext* opCtx,
const std::string& ns,
const ShardKeyPattern& fieldsAndOrder,
const BSONObj& defaultCollation,
@@ -157,13 +157,13 @@ public:
* Because of the asynchronous nature of the draining mechanism, this method returns
* the current draining status. See ShardDrainingStatus enum definition for more details.
*/
- virtual StatusWith<ShardDrainingStatus> removeShard(OperationContext* txn,
+ virtual StatusWith<ShardDrainingStatus> removeShard(OperationContext* opCtx,
const ShardId& name) = 0;
/**
* Updates or creates the metadata for a given database.
*/
- virtual Status updateDatabase(OperationContext* txn,
+ virtual Status updateDatabase(OperationContext* opCtx,
const std::string& dbName,
const DatabaseType& db) = 0;
@@ -177,13 +177,13 @@ public:
* the failure. These are some of the known failures:
* - NamespaceNotFound - database does not exist
*/
- virtual StatusWith<repl::OpTimeWith<DatabaseType>> getDatabase(OperationContext* txn,
+ virtual StatusWith<repl::OpTimeWith<DatabaseType>> getDatabase(OperationContext* opCtx,
const std::string& dbName) = 0;
/**
* Updates or creates the metadata for a given collection.
*/
- virtual Status updateCollection(OperationContext* txn,
+ virtual Status updateCollection(OperationContext* opCtx,
const std::string& collNs,
const CollectionType& coll) = 0;
@@ -198,7 +198,7 @@ public:
* - NamespaceNotFound - collection does not exist
*/
virtual StatusWith<repl::OpTimeWith<CollectionType>> getCollection(
- OperationContext* txn, const std::string& collNs) = 0;
+ OperationContext* opCtx, const std::string& collNs) = 0;
/**
* Retrieves all collections undera specified database (or in the system).
@@ -212,7 +212,7 @@ public:
*
* Returns a !OK status if an error occurs.
*/
- virtual Status getCollections(OperationContext* txn,
+ virtual Status getCollections(OperationContext* opCtx,
const std::string* dbName,
std::vector<CollectionType>* collections,
repl::OpTime* optime) = 0;
@@ -224,14 +224,14 @@ public:
* some of the known failures:
* - NamespaceNotFound - collection does not exist
*/
- virtual Status dropCollection(OperationContext* txn, const NamespaceString& ns) = 0;
+ virtual Status dropCollection(OperationContext* opCtx, const NamespaceString& ns) = 0;
/**
* Retrieves all databases for a shard.
*
* Returns a !OK status if an error occurs.
*/
- virtual Status getDatabasesForShard(OperationContext* txn,
+ virtual Status getDatabasesForShard(OperationContext* opCtx,
const ShardId& shardId,
std::vector<std::string>* dbs) = 0;
@@ -249,7 +249,7 @@ public:
*
* Returns a !OK status if an error occurs.
*/
- virtual Status getChunks(OperationContext* txn,
+ virtual Status getChunks(OperationContext* opCtx,
const BSONObj& filter,
const BSONObj& sort,
boost::optional<int> limit,
@@ -260,7 +260,7 @@ public:
/**
* Retrieves all tags for the specified collection.
*/
- virtual Status getTagsForCollection(OperationContext* txn,
+ virtual Status getTagsForCollection(OperationContext* opCtx,
const std::string& collectionNs,
std::vector<TagsType>* tags) = 0;
@@ -269,7 +269,7 @@ public:
* Returns a !OK status if an error occurs.
*/
virtual StatusWith<repl::OpTimeWith<std::vector<ShardType>>> getAllShards(
- OperationContext* txn, repl::ReadConcernLevel readConcern) = 0;
+ OperationContext* opCtx, repl::ReadConcernLevel readConcern) = 0;
/**
* Runs a user management command on the config servers, potentially synchronizing through
@@ -281,7 +281,7 @@ public:
* @param result: contains data returned from config servers
* Returns true on success.
*/
- virtual bool runUserManagementWriteCommand(OperationContext* txn,
+ virtual bool runUserManagementWriteCommand(OperationContext* opCtx,
const std::string& commandName,
const std::string& dbname,
const BSONObj& cmdObj,
@@ -290,7 +290,7 @@ public:
/**
* Runs a user management related read-only command on a config server.
*/
- virtual bool runUserManagementReadCommand(OperationContext* txn,
+ virtual bool runUserManagementReadCommand(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder* result) = 0;
@@ -313,7 +313,7 @@ public:
* failure because the precondition no longer matches. If a query of the chunks collection
* returns a document matching both 'nss' and 'lastChunkVersion,' the write succeeded.
*/
- virtual Status applyChunkOpsDeprecated(OperationContext* txn,
+ virtual Status applyChunkOpsDeprecated(OperationContext* opCtx,
const BSONArray& updateOps,
const BSONArray& preCondition,
const std::string& nss,
@@ -324,7 +324,7 @@ public:
/**
* Writes a diagnostic event to the action log.
*/
- virtual Status logAction(OperationContext* txn,
+ virtual Status logAction(OperationContext* opCtx,
const std::string& what,
const std::string& ns,
const BSONObj& detail) = 0;
@@ -332,7 +332,7 @@ public:
/**
* Writes a diagnostic event to the change log.
*/
- virtual Status logChange(OperationContext* txn,
+ virtual Status logChange(OperationContext* opCtx,
const std::string& what,
const std::string& ns,
const BSONObj& detail,
@@ -348,13 +348,13 @@ public:
* Returns ErrorCodes::NoMatchingDocument if no such key exists or the BSON content of the
* setting otherwise.
*/
- virtual StatusWith<BSONObj> getGlobalSettings(OperationContext* txn, StringData key) = 0;
+ virtual StatusWith<BSONObj> getGlobalSettings(OperationContext* opCtx, StringData key) = 0;
/**
* Returns the contents of the config.version document - containing the current cluster schema
* version as well as the clusterID.
*/
- virtual StatusWith<VersionType> getConfigVersion(OperationContext* txn,
+ virtual StatusWith<VersionType> getConfigVersion(OperationContext* opCtx,
repl::ReadConcernLevel readConcern) = 0;
/**
@@ -367,7 +367,7 @@ public:
* @param request Request to be sent to the config server.
* @param response Out parameter to receive the response. Can be nullptr.
*/
- virtual void writeConfigServerDirect(OperationContext* txn,
+ virtual void writeConfigServerDirect(OperationContext* opCtx,
const BatchedCommandRequest& request,
BatchedCommandResponse* response) = 0;
@@ -383,7 +383,7 @@ public:
* - DatabaseDifferCase - database already exists, but with a different case
* - ShardNotFound - could not find a shard to place the DB on
*/
- virtual Status createDatabase(OperationContext* txn, const std::string& dbName) = 0;
+ virtual Status createDatabase(OperationContext* opCtx, const std::string& dbName) = 0;
/**
* Directly inserts a document in the specified namespace on the config server. The document
@@ -391,7 +391,7 @@ public:
*
* NOTE: Should not be used in new code outside the ShardingCatalogManager.
*/
- virtual Status insertConfigDocument(OperationContext* txn,
+ virtual Status insertConfigDocument(OperationContext* opCtx,
const std::string& ns,
const BSONObj& doc,
const WriteConcernOptions& writeConcern) = 0;
@@ -410,7 +410,7 @@ public:
*
* NOTE: Should not be used in new code outside the ShardingCatalogManager.
*/
- virtual StatusWith<bool> updateConfigDocument(OperationContext* txn,
+ virtual StatusWith<bool> updateConfigDocument(OperationContext* opCtx,
const std::string& ns,
const BSONObj& query,
const BSONObj& update,
@@ -423,7 +423,7 @@ public:
*
* NOTE: Should not be used in new code outside the ShardingCatalogManager.
*/
- virtual Status removeConfigDocuments(OperationContext* txn,
+ virtual Status removeConfigDocuments(OperationContext* opCtx,
const std::string& ns,
const BSONObj& query,
const WriteConcernOptions& writeConcern) = 0;
@@ -433,7 +433,7 @@ public:
* format for listDatabases, based on the listDatabases command parameters in
* 'listDatabasesCmd'.
*/
- virtual Status appendInfoForConfigServerDatabases(OperationContext* txn,
+ virtual Status appendInfoForConfigServerDatabases(OperationContext* opCtx,
const BSONObj& listDatabasesCmd,
BSONArrayBuilder* builder) = 0;
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index 676372be4fd..f541dde581f 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -121,7 +121,7 @@ void toBatchError(const Status& status, BatchedCommandResponse* response) {
* Creates and writes to the config server the first chunks for a newly sharded collection. Returns
* the version generated for the collection.
*/
-StatusWith<ChunkVersion> createFirstChunks(OperationContext* txn,
+StatusWith<ChunkVersion> createFirstChunks(OperationContext* opCtx,
const NamespaceString& nss,
const ShardKeyPattern& shardKeyPattern,
const ShardId& primaryShardId,
@@ -136,10 +136,10 @@ StatusWith<ChunkVersion> createFirstChunks(OperationContext* txn,
if (initPoints.empty()) {
// If no split points were specified use the shard's data distribution to determine them
auto primaryShard =
- uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, primaryShardId));
+ uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, primaryShardId));
auto result = uassertStatusOK(primaryShard->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
nss.db().toString(),
BSON("count" << nss.coll()),
@@ -151,12 +151,12 @@ StatusWith<ChunkVersion> createFirstChunks(OperationContext* txn,
if (numObjects > 0) {
splitPoints = uassertStatusOK(shardutil::selectChunkSplitPoints(
- txn,
+ opCtx,
primaryShardId,
nss,
shardKeyPattern,
ChunkRange(keyPattern.globalMin(), keyPattern.globalMax()),
- Grid::get(txn)->getBalancerConfiguration()->getMaxChunkSizeBytes(),
+ Grid::get(opCtx)->getBalancerConfiguration()->getMaxChunkSizeBytes(),
0));
}
@@ -207,8 +207,8 @@ StatusWith<ChunkVersion> createFirstChunks(OperationContext* txn,
chunk.setShard(shardIds[i % shardIds.size()]);
chunk.setVersion(version);
- Status status = Grid::get(txn)->catalogClient(txn)->insertConfigDocument(
- txn,
+ Status status = Grid::get(opCtx)->catalogClient(opCtx)->insertConfigDocument(
+ opCtx,
ChunkType::ConfigNS,
chunk.toConfigBSON(),
ShardingCatalogClient::kMajorityWriteConcern);
@@ -240,7 +240,7 @@ Status ShardingCatalogClientImpl::startup() {
return Status::OK();
}
-void ShardingCatalogClientImpl::shutDown(OperationContext* txn) {
+void ShardingCatalogClientImpl::shutDown(OperationContext* opCtx) {
LOG(1) << "ShardingCatalogClientImpl::shutDown() called.";
{
stdx::lock_guard<stdx::mutex> lk(_mutex);
@@ -248,15 +248,15 @@ void ShardingCatalogClientImpl::shutDown(OperationContext* txn) {
}
invariant(_distLockManager);
- _distLockManager->shutDown(txn);
+ _distLockManager->shutDown(opCtx);
}
-Status ShardingCatalogClientImpl::updateCollection(OperationContext* txn,
+Status ShardingCatalogClientImpl::updateCollection(OperationContext* opCtx,
const std::string& collNs,
const CollectionType& coll) {
fassert(28634, coll.validate());
- auto status = updateConfigDocument(txn,
+ auto status = updateConfigDocument(opCtx,
CollectionType::ConfigNS,
BSON(CollectionType::fullNs(collNs)),
coll.toBSON(),
@@ -271,12 +271,12 @@ Status ShardingCatalogClientImpl::updateCollection(OperationContext* txn,
return Status::OK();
}
-Status ShardingCatalogClientImpl::updateDatabase(OperationContext* txn,
+Status ShardingCatalogClientImpl::updateDatabase(OperationContext* opCtx,
const std::string& dbName,
const DatabaseType& db) {
fassert(28616, db.validate());
- auto status = updateConfigDocument(txn,
+ auto status = updateConfigDocument(opCtx,
DatabaseType::ConfigNS,
BSON(DatabaseType::name(dbName)),
db.toBSON(),
@@ -291,7 +291,8 @@ Status ShardingCatalogClientImpl::updateDatabase(OperationContext* txn,
return Status::OK();
}
-Status ShardingCatalogClientImpl::createDatabase(OperationContext* txn, const std::string& dbName) {
+Status ShardingCatalogClientImpl::createDatabase(OperationContext* opCtx,
+ const std::string& dbName) {
invariant(nsIsDbOnly(dbName));
// The admin and config databases should never be explicitly created. They "just exist",
@@ -301,19 +302,19 @@ Status ShardingCatalogClientImpl::createDatabase(OperationContext* txn, const st
// Lock the database globally to prevent conflicts with simultaneous database creation.
auto scopedDistLock = getDistLockManager()->lock(
- txn, dbName, "createDatabase", DistLockManager::kDefaultLockTimeout);
+ opCtx, dbName, "createDatabase", DistLockManager::kDefaultLockTimeout);
if (!scopedDistLock.isOK()) {
return scopedDistLock.getStatus();
}
// check for case sensitivity violations
- Status status = _checkDbDoesNotExist(txn, dbName, nullptr);
+ Status status = _checkDbDoesNotExist(opCtx, dbName, nullptr);
if (!status.isOK()) {
return status;
}
// Database does not exist, pick a shard and create a new entry
- auto newShardIdStatus = _selectShardForNewDatabase(txn, grid.shardRegistry());
+ auto newShardIdStatus = _selectShardForNewDatabase(opCtx, grid.shardRegistry());
if (!newShardIdStatus.isOK()) {
return newShardIdStatus.getStatus();
}
@@ -328,7 +329,7 @@ Status ShardingCatalogClientImpl::createDatabase(OperationContext* txn, const st
db.setSharded(false);
status = insertConfigDocument(
- txn, DatabaseType::ConfigNS, db.toBSON(), ShardingCatalogClient::kMajorityWriteConcern);
+ opCtx, DatabaseType::ConfigNS, db.toBSON(), ShardingCatalogClient::kMajorityWriteConcern);
if (status.code() == ErrorCodes::DuplicateKey) {
return Status(ErrorCodes::NamespaceExists, "database " + dbName + " already exists");
}
@@ -336,12 +337,12 @@ Status ShardingCatalogClientImpl::createDatabase(OperationContext* txn, const st
return status;
}
-Status ShardingCatalogClientImpl::logAction(OperationContext* txn,
+Status ShardingCatalogClientImpl::logAction(OperationContext* opCtx,
const std::string& what,
const std::string& ns,
const BSONObj& detail) {
if (_actionLogCollectionCreated.load() == 0) {
- Status result = _createCappedConfigCollection(txn,
+ Status result = _createCappedConfigCollection(opCtx,
kActionLogCollectionName,
kActionLogCollectionSizeMB,
ShardingCatalogClient::kMajorityWriteConcern);
@@ -353,7 +354,7 @@ Status ShardingCatalogClientImpl::logAction(OperationContext* txn,
}
}
- return _log(txn,
+ return _log(opCtx,
kActionLogCollectionName,
what,
ns,
@@ -361,7 +362,7 @@ Status ShardingCatalogClientImpl::logAction(OperationContext* txn,
ShardingCatalogClient::kMajorityWriteConcern);
}
-Status ShardingCatalogClientImpl::logChange(OperationContext* txn,
+Status ShardingCatalogClientImpl::logChange(OperationContext* opCtx,
const std::string& what,
const std::string& ns,
const BSONObj& detail,
@@ -370,7 +371,7 @@ Status ShardingCatalogClientImpl::logChange(OperationContext* txn,
writeConcern.wMode == WriteConcernOptions::kMajority);
if (_changeLogCollectionCreated.load() == 0) {
Status result = _createCappedConfigCollection(
- txn, kChangeLogCollectionName, kChangeLogCollectionSizeMB, writeConcern);
+ opCtx, kChangeLogCollectionName, kChangeLogCollectionSizeMB, writeConcern);
if (result.isOK()) {
_changeLogCollectionCreated.store(1);
} else {
@@ -379,17 +380,17 @@ Status ShardingCatalogClientImpl::logChange(OperationContext* txn,
}
}
- return _log(txn, kChangeLogCollectionName, what, ns, detail, writeConcern);
+ return _log(opCtx, kChangeLogCollectionName, what, ns, detail, writeConcern);
}
// static
StatusWith<ShardId> ShardingCatalogClientImpl::_selectShardForNewDatabase(
- OperationContext* txn, ShardRegistry* shardRegistry) {
+ OperationContext* opCtx, ShardRegistry* shardRegistry) {
vector<ShardId> allShardIds;
shardRegistry->getAllShardIds(&allShardIds);
if (allShardIds.empty()) {
- shardRegistry->reload(txn);
+ shardRegistry->reload(opCtx);
shardRegistry->getAllShardIds(&allShardIds);
if (allShardIds.empty()) {
@@ -399,7 +400,7 @@ StatusWith<ShardId> ShardingCatalogClientImpl::_selectShardForNewDatabase(
ShardId candidateShardId = allShardIds[0];
- auto candidateSizeStatus = shardutil::retrieveTotalShardSize(txn, candidateShardId);
+ auto candidateSizeStatus = shardutil::retrieveTotalShardSize(opCtx, candidateShardId);
if (!candidateSizeStatus.isOK()) {
return candidateSizeStatus.getStatus();
}
@@ -407,7 +408,7 @@ StatusWith<ShardId> ShardingCatalogClientImpl::_selectShardForNewDatabase(
for (size_t i = 1; i < allShardIds.size(); i++) {
const ShardId shardId = allShardIds[i];
- const auto sizeStatus = shardutil::retrieveTotalShardSize(txn, shardId);
+ const auto sizeStatus = shardutil::retrieveTotalShardSize(opCtx, shardId);
if (!sizeStatus.isOK()) {
return sizeStatus.getStatus();
}
@@ -421,7 +422,8 @@ StatusWith<ShardId> ShardingCatalogClientImpl::_selectShardForNewDatabase(
return candidateShardId;
}
-Status ShardingCatalogClientImpl::enableSharding(OperationContext* txn, const std::string& dbName) {
+Status ShardingCatalogClientImpl::enableSharding(OperationContext* opCtx,
+ const std::string& dbName) {
invariant(nsIsDbOnly(dbName));
if (dbName == NamespaceString::kConfigDb || dbName == NamespaceString::kAdminDb) {
@@ -433,7 +435,7 @@ Status ShardingCatalogClientImpl::enableSharding(OperationContext* txn, const st
// Lock the database globally to prevent conflicts with simultaneous database
// creation/modification.
auto scopedDistLock = getDistLockManager()->lock(
- txn, dbName, "enableSharding", DistLockManager::kDefaultLockTimeout);
+ opCtx, dbName, "enableSharding", DistLockManager::kDefaultLockTimeout);
if (!scopedDistLock.isOK()) {
return scopedDistLock.getStatus();
}
@@ -441,10 +443,10 @@ Status ShardingCatalogClientImpl::enableSharding(OperationContext* txn, const st
// Check for case sensitivity violations
DatabaseType db;
- Status status = _checkDbDoesNotExist(txn, dbName, &db);
+ Status status = _checkDbDoesNotExist(opCtx, dbName, &db);
if (status.isOK()) {
// Database does not exist, create a new entry
- auto newShardIdStatus = _selectShardForNewDatabase(txn, grid.shardRegistry());
+ auto newShardIdStatus = _selectShardForNewDatabase(opCtx, grid.shardRegistry());
if (!newShardIdStatus.isOK()) {
return newShardIdStatus.getStatus();
}
@@ -470,23 +472,23 @@ Status ShardingCatalogClientImpl::enableSharding(OperationContext* txn, const st
log() << "Enabling sharding for database [" << dbName << "] in config db";
- return updateDatabase(txn, dbName, db);
+ return updateDatabase(opCtx, dbName, db);
}
-Status ShardingCatalogClientImpl::_log(OperationContext* txn,
+Status ShardingCatalogClientImpl::_log(OperationContext* opCtx,
const StringData& logCollName,
const std::string& what,
const std::string& operationNS,
const BSONObj& detail,
const WriteConcernOptions& writeConcern) {
- Date_t now = Grid::get(txn)->getNetwork()->now();
- const std::string hostName = Grid::get(txn)->getNetwork()->getHostName();
+ Date_t now = Grid::get(opCtx)->getNetwork()->now();
+ const std::string hostName = Grid::get(opCtx)->getNetwork()->getHostName();
const string changeId = str::stream() << hostName << "-" << now.toString() << "-" << OID::gen();
ChangeLogType changeLog;
changeLog.setChangeId(changeId);
changeLog.setServer(hostName);
- changeLog.setClientAddr(txn->getClient()->clientAddress(true));
+ changeLog.setClientAddr(opCtx->getClient()->clientAddress(true));
changeLog.setTime(now);
changeLog.setNS(operationNS);
changeLog.setWhat(what);
@@ -496,7 +498,7 @@ Status ShardingCatalogClientImpl::_log(OperationContext* txn,
log() << "about to log metadata event into " << logCollName << ": " << redact(changeLogBSON);
const NamespaceString nss("config", logCollName);
- Status result = insertConfigDocument(txn, nss.ns(), changeLogBSON, writeConcern);
+ Status result = insertConfigDocument(opCtx, nss.ns(), changeLogBSON, writeConcern);
if (!result.isOK()) {
warning() << "Error encountered while logging config change with ID [" << changeId
@@ -506,7 +508,7 @@ Status ShardingCatalogClientImpl::_log(OperationContext* txn,
return result;
}
-Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
+Status ShardingCatalogClientImpl::shardCollection(OperationContext* opCtx,
const string& ns,
const ShardKeyPattern& fieldsAndOrder,
const BSONObj& defaultCollation,
@@ -516,20 +518,20 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
// Lock the collection globally so that no other mongos can try to shard or drop the collection
// at the same time.
auto scopedDistLock = getDistLockManager()->lock(
- txn, ns, "shardCollection", DistLockManager::kDefaultLockTimeout);
+ opCtx, ns, "shardCollection", DistLockManager::kDefaultLockTimeout);
if (!scopedDistLock.isOK()) {
return scopedDistLock.getStatus();
}
- auto getDBStatus = getDatabase(txn, nsToDatabase(ns));
+ auto getDBStatus = getDatabase(opCtx, nsToDatabase(ns));
if (!getDBStatus.isOK()) {
return getDBStatus.getStatus();
}
- auto const shardRegistry = Grid::get(txn)->shardRegistry();
+ auto const shardRegistry = Grid::get(opCtx)->shardRegistry();
ShardId dbPrimaryShardId = getDBStatus.getValue().value.getPrimary();
- const auto primaryShardStatus = shardRegistry->getShard(txn, dbPrimaryShardId);
+ const auto primaryShardStatus = shardRegistry->getShard(opCtx, dbPrimaryShardId);
if (!primaryShardStatus.isOK()) {
return primaryShardStatus.getStatus();
}
@@ -538,7 +540,7 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
// This is an extra safety check that there aren't any partially written chunks from a
// previous failed invocation of 'shardCollection'
auto countStatus = _runCountCommandOnConfig(
- txn, NamespaceString(ChunkType::ConfigNS), BSON(ChunkType::ns(ns)));
+ opCtx, NamespaceString(ChunkType::ConfigNS), BSON(ChunkType::ns(ns)));
if (!countStatus.isOK()) {
return countStatus.getStatus();
}
@@ -567,7 +569,7 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
collectionDetail.append("numChunks", static_cast<int>(initPoints.size() + 1));
- logChange(txn,
+ logChange(opCtx,
"shardCollection.start",
ns,
collectionDetail.obj(),
@@ -579,8 +581,8 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
// Construct the collection default collator.
std::unique_ptr<CollatorInterface> defaultCollator;
if (!defaultCollation.isEmpty()) {
- auto statusWithCollator =
- CollatorFactoryInterface::get(txn->getServiceContext())->makeFromBSON(defaultCollation);
+ auto statusWithCollator = CollatorFactoryInterface::get(opCtx->getServiceContext())
+ ->makeFromBSON(defaultCollation);
if (!statusWithCollator.isOK()) {
return statusWithCollator.getStatus();
}
@@ -588,7 +590,7 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
}
auto createFirstChunksStatus =
- createFirstChunks(txn, nss, fieldsAndOrder, dbPrimaryShardId, initPoints, initShardIds);
+ createFirstChunks(opCtx, nss, fieldsAndOrder, dbPrimaryShardId, initPoints, initShardIds);
if (!createFirstChunksStatus.isOK()) {
return createFirstChunksStatus.getStatus();
}
@@ -607,7 +609,7 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
coll.setDefaultCollation(defaultCollator ? defaultCollator->getSpec().toBSON() : BSONObj());
coll.setUnique(unique);
- Status updateCollStatus = updateCollection(txn, ns, coll);
+ Status updateCollStatus = updateCollection(opCtx, ns, coll);
if (!updateCollStatus.isOK()) {
return updateCollStatus;
}
@@ -624,14 +626,14 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
collVersion,
true);
- auto shardStatus = shardRegistry->getShard(txn, dbPrimaryShardId);
+ auto shardStatus = shardRegistry->getShard(opCtx, dbPrimaryShardId);
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
auto shard = shardStatus.getValue();
auto ssvResponse =
- shard->runCommandWithFixedRetryAttempts(txn,
+ shard->runCommandWithFixedRetryAttempts(opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"admin",
ssv.toBSON(),
@@ -643,7 +645,7 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
<< dbPrimaryShardId << causedBy(redact(status));
}
- logChange(txn,
+ logChange(opCtx,
"shardCollection.end",
ns,
BSON("version" << collVersion.toString()),
@@ -652,12 +654,12 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
return Status::OK();
}
-StatusWith<ShardDrainingStatus> ShardingCatalogClientImpl::removeShard(OperationContext* txn,
+StatusWith<ShardDrainingStatus> ShardingCatalogClientImpl::removeShard(OperationContext* opCtx,
const ShardId& shardId) {
// Check preconditions for removing the shard
string name = shardId.toString();
auto countStatus = _runCountCommandOnConfig(
- txn,
+ opCtx,
NamespaceString(ShardType::ConfigNS),
BSON(ShardType::name() << NE << name << ShardType::draining(true)));
if (!countStatus.isOK()) {
@@ -669,7 +671,7 @@ StatusWith<ShardDrainingStatus> ShardingCatalogClientImpl::removeShard(Operation
}
countStatus = _runCountCommandOnConfig(
- txn, NamespaceString(ShardType::ConfigNS), BSON(ShardType::name() << NE << name));
+ opCtx, NamespaceString(ShardType::ConfigNS), BSON(ShardType::name() << NE << name));
if (!countStatus.isOK()) {
return countStatus.getStatus();
}
@@ -679,7 +681,7 @@ StatusWith<ShardDrainingStatus> ShardingCatalogClientImpl::removeShard(Operation
// Figure out if shard is already draining
countStatus =
- _runCountCommandOnConfig(txn,
+ _runCountCommandOnConfig(opCtx,
NamespaceString(ShardType::ConfigNS),
BSON(ShardType::name() << name << ShardType::draining(true)));
if (!countStatus.isOK()) {
@@ -689,7 +691,7 @@ StatusWith<ShardDrainingStatus> ShardingCatalogClientImpl::removeShard(Operation
if (countStatus.getValue() == 0) {
log() << "going to start draining shard: " << name;
- auto updateStatus = updateConfigDocument(txn,
+ auto updateStatus = updateConfigDocument(opCtx,
ShardType::ConfigNS,
BSON(ShardType::name() << name),
BSON("$set" << BSON(ShardType::draining(true))),
@@ -701,10 +703,10 @@ StatusWith<ShardDrainingStatus> ShardingCatalogClientImpl::removeShard(Operation
return updateStatus.getStatus();
}
- grid.shardRegistry()->reload(txn);
+ grid.shardRegistry()->reload(opCtx);
// Record start in changelog
- logChange(txn,
+ logChange(opCtx,
"removeShard.start",
"",
BSON("shard" << name),
@@ -715,14 +717,14 @@ StatusWith<ShardDrainingStatus> ShardingCatalogClientImpl::removeShard(Operation
// Draining has already started, now figure out how many chunks and databases are still on the
// shard.
countStatus = _runCountCommandOnConfig(
- txn, NamespaceString(ChunkType::ConfigNS), BSON(ChunkType::shard(name)));
+ opCtx, NamespaceString(ChunkType::ConfigNS), BSON(ChunkType::shard(name)));
if (!countStatus.isOK()) {
return countStatus.getStatus();
}
const long long chunkCount = countStatus.getValue();
countStatus = _runCountCommandOnConfig(
- txn, NamespaceString(DatabaseType::ConfigNS), BSON(DatabaseType::primary(name)));
+ opCtx, NamespaceString(DatabaseType::ConfigNS), BSON(DatabaseType::primary(name)));
if (!countStatus.isOK()) {
return countStatus.getStatus();
}
@@ -735,9 +737,9 @@ StatusWith<ShardDrainingStatus> ShardingCatalogClientImpl::removeShard(Operation
// Draining is done, now finish removing the shard.
log() << "going to remove shard: " << name;
- audit::logRemoveShard(txn->getClient(), name);
+ audit::logRemoveShard(opCtx->getClient(), name);
- Status status = removeConfigDocuments(txn,
+ Status status = removeConfigDocuments(opCtx,
ShardType::ConfigNS,
BSON(ShardType::name() << name),
ShardingCatalogClient::kMajorityWriteConcern);
@@ -750,10 +752,10 @@ StatusWith<ShardDrainingStatus> ShardingCatalogClientImpl::removeShard(Operation
shardConnectionPool.removeHost(name);
ReplicaSetMonitor::remove(name);
- grid.shardRegistry()->reload(txn);
+ grid.shardRegistry()->reload(opCtx);
// Record finish in changelog
- logChange(txn,
+ logChange(opCtx,
"removeShard",
"",
BSON("shard" << name),
@@ -763,7 +765,7 @@ StatusWith<ShardDrainingStatus> ShardingCatalogClientImpl::removeShard(Operation
}
StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientImpl::getDatabase(
- OperationContext* txn, const std::string& dbName) {
+ OperationContext* opCtx, const std::string& dbName) {
if (!NamespaceString::validDBName(dbName, NamespaceString::DollarInDbNameBehavior::Allow)) {
return {ErrorCodes::InvalidNamespace, stream() << dbName << " is not a valid db name"};
}
@@ -778,12 +780,12 @@ StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientImpl::getDatabas
return repl::OpTimeWith<DatabaseType>(dbt);
}
- auto result = _fetchDatabaseMetadata(txn, dbName, kConfigReadSelector);
+ auto result = _fetchDatabaseMetadata(opCtx, dbName, kConfigReadSelector);
if (result == ErrorCodes::NamespaceNotFound) {
// If we failed to find the database metadata on the 'nearest' config server, try again
// against the primary, in case the database was recently created.
- result =
- _fetchDatabaseMetadata(txn, dbName, ReadPreferenceSetting{ReadPreference::PrimaryOnly});
+ result = _fetchDatabaseMetadata(
+ opCtx, dbName, ReadPreferenceSetting{ReadPreference::PrimaryOnly});
if (!result.isOK() && (result != ErrorCodes::NamespaceNotFound)) {
return {result.getStatus().code(),
str::stream() << "Could not confirm non-existence of database " << dbName
@@ -796,10 +798,10 @@ StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientImpl::getDatabas
}
StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientImpl::_fetchDatabaseMetadata(
- OperationContext* txn, const std::string& dbName, const ReadPreferenceSetting& readPref) {
+ OperationContext* opCtx, const std::string& dbName, const ReadPreferenceSetting& readPref) {
dassert(dbName != "admin" && dbName != "config");
- auto findStatus = _exhaustiveFindOnConfig(txn,
+ auto findStatus = _exhaustiveFindOnConfig(opCtx,
readPref,
repl::ReadConcernLevel::kMajorityReadConcern,
NamespaceString(DatabaseType::ConfigNS),
@@ -826,8 +828,8 @@ StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientImpl::_fetchData
}
StatusWith<repl::OpTimeWith<CollectionType>> ShardingCatalogClientImpl::getCollection(
- OperationContext* txn, const std::string& collNs) {
- auto statusFind = _exhaustiveFindOnConfig(txn,
+ OperationContext* opCtx, const std::string& collNs) {
+ auto statusFind = _exhaustiveFindOnConfig(opCtx,
kConfigReadSelector,
repl::ReadConcernLevel::kMajorityReadConcern,
NamespaceString(CollectionType::ConfigNS),
@@ -855,7 +857,7 @@ StatusWith<repl::OpTimeWith<CollectionType>> ShardingCatalogClientImpl::getColle
return repl::OpTimeWith<CollectionType>(parseStatus.getValue(), retOpTimePair.opTime);
}
-Status ShardingCatalogClientImpl::getCollections(OperationContext* txn,
+Status ShardingCatalogClientImpl::getCollections(OperationContext* opCtx,
const std::string* dbName,
std::vector<CollectionType>* collections,
OpTime* opTime) {
@@ -866,7 +868,7 @@ Status ShardingCatalogClientImpl::getCollections(OperationContext* txn,
string(str::stream() << "^" << pcrecpp::RE::QuoteMeta(*dbName) << "\\."));
}
- auto findStatus = _exhaustiveFindOnConfig(txn,
+ auto findStatus = _exhaustiveFindOnConfig(opCtx,
kConfigReadSelector,
repl::ReadConcernLevel::kMajorityReadConcern,
NamespaceString(CollectionType::ConfigNS),
@@ -901,14 +903,15 @@ Status ShardingCatalogClientImpl::getCollections(OperationContext* txn,
return Status::OK();
}
-Status ShardingCatalogClientImpl::dropCollection(OperationContext* txn, const NamespaceString& ns) {
- logChange(txn,
+Status ShardingCatalogClientImpl::dropCollection(OperationContext* opCtx,
+ const NamespaceString& ns) {
+ logChange(opCtx,
"dropCollection.start",
ns.ns(),
BSONObj(),
ShardingCatalogClientImpl::kMajorityWriteConcern);
- auto shardsStatus = getAllShards(txn, repl::ReadConcernLevel::kMajorityReadConcern);
+ auto shardsStatus = getAllShards(opCtx, repl::ReadConcernLevel::kMajorityReadConcern);
if (!shardsStatus.isOK()) {
return shardsStatus.getStatus();
}
@@ -923,7 +926,7 @@ Status ShardingCatalogClientImpl::dropCollection(OperationContext* txn, const Na
waitFor = Seconds(data["waitForSecs"].numberInt());
}
- auto scopedDistLock = getDistLockManager()->lock(txn, ns.ns(), "drop", waitFor);
+ auto scopedDistLock = getDistLockManager()->lock(opCtx, ns.ns(), "drop", waitFor);
if (!scopedDistLock.isOK()) {
return scopedDistLock.getStatus();
}
@@ -934,16 +937,16 @@ Status ShardingCatalogClientImpl::dropCollection(OperationContext* txn, const Na
auto* shardRegistry = grid.shardRegistry();
for (const auto& shardEntry : allShards) {
- auto shardStatus = shardRegistry->getShard(txn, shardEntry.getName());
+ auto shardStatus = shardRegistry->getShard(opCtx, shardEntry.getName());
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
auto dropResult = shardStatus.getValue()->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
ns.db().toString(),
BSON("drop" << ns.coll() << WriteConcernOptions::kWriteConcernField
- << txn->getWriteConcern().toBSON()),
+ << opCtx->getWriteConcern().toBSON()),
Shard::RetryPolicy::kIdempotent);
if (!dropResult.isOK()) {
@@ -987,7 +990,7 @@ Status ShardingCatalogClientImpl::dropCollection(OperationContext* txn, const Na
LOG(1) << "dropCollection " << ns << " shard data deleted";
// Remove chunk data
- Status result = removeConfigDocuments(txn,
+ Status result = removeConfigDocuments(opCtx,
ChunkType::ConfigNS,
BSON(ChunkType::ns(ns.ns())),
ShardingCatalogClient::kMajorityWriteConcern);
@@ -1002,9 +1005,9 @@ Status ShardingCatalogClientImpl::dropCollection(OperationContext* txn, const Na
coll.setNs(ns);
coll.setDropped(true);
coll.setEpoch(ChunkVersion::DROPPED().epoch());
- coll.setUpdatedAt(Grid::get(txn)->getNetwork()->now());
+ coll.setUpdatedAt(Grid::get(opCtx)->getNetwork()->now());
- result = updateCollection(txn, ns.ns(), coll);
+ result = updateCollection(opCtx, ns.ns(), coll);
if (!result.isOK()) {
return result;
}
@@ -1020,14 +1023,14 @@ Status ShardingCatalogClientImpl::dropCollection(OperationContext* txn, const Na
ChunkVersion::DROPPED(),
true);
- auto shardStatus = shardRegistry->getShard(txn, shardEntry.getName());
+ auto shardStatus = shardRegistry->getShard(opCtx, shardEntry.getName());
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
auto shard = shardStatus.getValue();
auto ssvResult = shard->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"admin",
ssv.toBSON(),
@@ -1043,7 +1046,7 @@ Status ShardingCatalogClientImpl::dropCollection(OperationContext* txn, const Na
}
auto unsetShardingStatus = shard->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"admin",
BSON("unsetSharding" << 1),
@@ -1061,7 +1064,7 @@ Status ShardingCatalogClientImpl::dropCollection(OperationContext* txn, const Na
LOG(1) << "dropCollection " << ns << " completed";
- logChange(txn,
+ logChange(opCtx,
"dropCollection",
ns.ns(),
BSONObj(),
@@ -1070,9 +1073,9 @@ Status ShardingCatalogClientImpl::dropCollection(OperationContext* txn, const Na
return Status::OK();
}
-StatusWith<BSONObj> ShardingCatalogClientImpl::getGlobalSettings(OperationContext* txn,
+StatusWith<BSONObj> ShardingCatalogClientImpl::getGlobalSettings(OperationContext* opCtx,
StringData key) {
- auto findStatus = _exhaustiveFindOnConfig(txn,
+ auto findStatus = _exhaustiveFindOnConfig(opCtx,
kConfigReadSelector,
repl::ReadConcernLevel::kMajorityReadConcern,
kSettingsNamespace,
@@ -1094,9 +1097,9 @@ StatusWith<BSONObj> ShardingCatalogClientImpl::getGlobalSettings(OperationContex
}
StatusWith<VersionType> ShardingCatalogClientImpl::getConfigVersion(
- OperationContext* txn, repl::ReadConcernLevel readConcern) {
- auto findStatus = Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
- txn,
+ OperationContext* opCtx, repl::ReadConcernLevel readConcern) {
+ auto findStatus = Grid::get(opCtx)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ opCtx,
kConfigReadSelector,
readConcern,
NamespaceString(VersionType::ConfigNS),
@@ -1142,10 +1145,10 @@ StatusWith<VersionType> ShardingCatalogClientImpl::getConfigVersion(
return versionTypeResult.getValue();
}
-Status ShardingCatalogClientImpl::getDatabasesForShard(OperationContext* txn,
+Status ShardingCatalogClientImpl::getDatabasesForShard(OperationContext* opCtx,
const ShardId& shardId,
vector<string>* dbs) {
- auto findStatus = _exhaustiveFindOnConfig(txn,
+ auto findStatus = _exhaustiveFindOnConfig(opCtx,
kConfigReadSelector,
repl::ReadConcernLevel::kMajorityReadConcern,
NamespaceString(DatabaseType::ConfigNS),
@@ -1170,7 +1173,7 @@ Status ShardingCatalogClientImpl::getDatabasesForShard(OperationContext* txn,
return Status::OK();
}
-Status ShardingCatalogClientImpl::getChunks(OperationContext* txn,
+Status ShardingCatalogClientImpl::getChunks(OperationContext* opCtx,
const BSONObj& query,
const BSONObj& sort,
boost::optional<int> limit,
@@ -1183,7 +1186,7 @@ Status ShardingCatalogClientImpl::getChunks(OperationContext* txn,
// Convert boost::optional<int> to boost::optional<long long>.
auto longLimit = limit ? boost::optional<long long>(*limit) : boost::none;
- auto findStatus = _exhaustiveFindOnConfig(txn,
+ auto findStatus = _exhaustiveFindOnConfig(opCtx,
kConfigReadSelector,
readConcern,
NamespaceString(ChunkType::ConfigNS),
@@ -1217,12 +1220,12 @@ Status ShardingCatalogClientImpl::getChunks(OperationContext* txn,
return Status::OK();
}
-Status ShardingCatalogClientImpl::getTagsForCollection(OperationContext* txn,
+Status ShardingCatalogClientImpl::getTagsForCollection(OperationContext* opCtx,
const std::string& collectionNs,
std::vector<TagsType>* tags) {
tags->clear();
- auto findStatus = _exhaustiveFindOnConfig(txn,
+ auto findStatus = _exhaustiveFindOnConfig(opCtx,
kConfigReadSelector,
repl::ReadConcernLevel::kMajorityReadConcern,
NamespaceString(TagsType::ConfigNS),
@@ -1252,9 +1255,9 @@ Status ShardingCatalogClientImpl::getTagsForCollection(OperationContext* txn,
}
StatusWith<repl::OpTimeWith<std::vector<ShardType>>> ShardingCatalogClientImpl::getAllShards(
- OperationContext* txn, repl::ReadConcernLevel readConcern) {
+ OperationContext* opCtx, repl::ReadConcernLevel readConcern) {
std::vector<ShardType> shards;
- auto findStatus = _exhaustiveFindOnConfig(txn,
+ auto findStatus = _exhaustiveFindOnConfig(opCtx,
kConfigReadSelector,
readConcern,
NamespaceString(ShardType::ConfigNS),
@@ -1287,7 +1290,7 @@ StatusWith<repl::OpTimeWith<std::vector<ShardType>>> ShardingCatalogClientImpl::
findStatus.getValue().opTime};
}
-bool ShardingCatalogClientImpl::runUserManagementWriteCommand(OperationContext* txn,
+bool ShardingCatalogClientImpl::runUserManagementWriteCommand(OperationContext* opCtx,
const std::string& commandName,
const std::string& dbname,
const BSONObj& cmdObj,
@@ -1339,8 +1342,8 @@ bool ShardingCatalogClientImpl::runUserManagementWriteCommand(OperationContext*
}
auto response =
- Grid::get(txn)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
- txn,
+ Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
dbname,
cmdToRun,
@@ -1361,7 +1364,7 @@ bool ShardingCatalogClientImpl::runUserManagementWriteCommand(OperationContext*
return true;
}
-bool ShardingCatalogClientImpl::runReadCommandForTest(OperationContext* txn,
+bool ShardingCatalogClientImpl::runReadCommandForTest(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder* result) {
@@ -1370,8 +1373,8 @@ bool ShardingCatalogClientImpl::runReadCommandForTest(OperationContext* txn,
_appendReadConcern(&cmdBuilder);
auto resultStatus =
- Grid::get(txn)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
- txn, kConfigReadSelector, dbname, cmdBuilder.done(), Shard::RetryPolicy::kIdempotent);
+ Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
+ opCtx, kConfigReadSelector, dbname, cmdBuilder.done(), Shard::RetryPolicy::kIdempotent);
if (resultStatus.isOK()) {
result->appendElements(resultStatus.getValue().response);
return resultStatus.getValue().commandStatus.isOK();
@@ -1380,13 +1383,13 @@ bool ShardingCatalogClientImpl::runReadCommandForTest(OperationContext* txn,
return Command::appendCommandStatus(*result, resultStatus.getStatus());
}
-bool ShardingCatalogClientImpl::runUserManagementReadCommand(OperationContext* txn,
+bool ShardingCatalogClientImpl::runUserManagementReadCommand(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder* result) {
auto resultStatus =
- Grid::get(txn)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
- txn,
+ Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
+ opCtx,
kConfigPrimaryPreferredSelector,
dbname,
cmdObj,
@@ -1400,7 +1403,7 @@ bool ShardingCatalogClientImpl::runUserManagementReadCommand(OperationContext* t
return Command::appendCommandStatus(*result, resultStatus.getStatus());
}
-Status ShardingCatalogClientImpl::applyChunkOpsDeprecated(OperationContext* txn,
+Status ShardingCatalogClientImpl::applyChunkOpsDeprecated(OperationContext* opCtx,
const BSONArray& updateOps,
const BSONArray& preCondition,
const std::string& nss,
@@ -1415,8 +1418,8 @@ Status ShardingCatalogClientImpl::applyChunkOpsDeprecated(OperationContext* txn,
<< writeConcern.toBSON());
auto response =
- Grid::get(txn)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
- txn,
+ Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"config",
cmd,
@@ -1456,7 +1459,7 @@ Status ShardingCatalogClientImpl::applyChunkOpsDeprecated(OperationContext* txn,
lastChunkVersion.addToBSON(query, ChunkType::DEPRECATED_lastmod());
query.append(ChunkType::ns(), nss);
Status chunkStatus =
- getChunks(txn, query.obj(), BSONObj(), 1, &newestChunk, nullptr, readConcern);
+ getChunks(opCtx, query.obj(), BSONObj(), 1, &newestChunk, nullptr, readConcern);
if (!chunkStatus.isOK()) {
errMsg = str::stream() << "getChunks function failed, unable to validate chunk "
@@ -1486,7 +1489,7 @@ DistLockManager* ShardingCatalogClientImpl::getDistLockManager() {
return _distLockManager.get();
}
-void ShardingCatalogClientImpl::writeConfigServerDirect(OperationContext* txn,
+void ShardingCatalogClientImpl::writeConfigServerDirect(OperationContext* opCtx,
const BatchedCommandRequest& batchRequest,
BatchedCommandResponse* batchResponse) {
// We only support batch sizes of one for config writes
@@ -1499,12 +1502,12 @@ void ShardingCatalogClientImpl::writeConfigServerDirect(OperationContext* txn,
return;
}
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
*batchResponse = configShard->runBatchWriteCommandOnConfig(
- txn, batchRequest, Shard::RetryPolicy::kNotIdempotent);
+ opCtx, batchRequest, Shard::RetryPolicy::kNotIdempotent);
}
-Status ShardingCatalogClientImpl::insertConfigDocument(OperationContext* txn,
+Status ShardingCatalogClientImpl::insertConfigDocument(OperationContext* opCtx,
const std::string& ns,
const BSONObj& doc,
const WriteConcernOptions& writeConcern) {
@@ -1521,10 +1524,10 @@ Status ShardingCatalogClientImpl::insertConfigDocument(OperationContext* txn,
request.setNS(nss);
request.setWriteConcern(writeConcern.toBSON());
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
for (int retry = 1; retry <= kMaxWriteRetry; retry++) {
auto response =
- configShard->runBatchWriteCommandOnConfig(txn, request, Shard::RetryPolicy::kNoRetry);
+ configShard->runBatchWriteCommandOnConfig(opCtx, request, Shard::RetryPolicy::kNoRetry);
Status status = response.toStatus();
@@ -1544,7 +1547,7 @@ Status ShardingCatalogClientImpl::insertConfigDocument(OperationContext* txn,
LOG(1) << "Insert retry failed because of duplicate key error, rechecking.";
auto fetchDuplicate =
- _exhaustiveFindOnConfig(txn,
+ _exhaustiveFindOnConfig(opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kMajorityReadConcern,
nss,
@@ -1580,7 +1583,7 @@ Status ShardingCatalogClientImpl::insertConfigDocument(OperationContext* txn,
}
StatusWith<bool> ShardingCatalogClientImpl::updateConfigDocument(
- OperationContext* txn,
+ OperationContext* opCtx,
const string& ns,
const BSONObj& query,
const BSONObj& update,
@@ -1605,9 +1608,9 @@ StatusWith<bool> ShardingCatalogClientImpl::updateConfigDocument(
request.setNS(nss);
request.setWriteConcern(writeConcern.toBSON());
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto response =
- configShard->runBatchWriteCommandOnConfig(txn, request, Shard::RetryPolicy::kIdempotent);
+ configShard->runBatchWriteCommandOnConfig(opCtx, request, Shard::RetryPolicy::kIdempotent);
Status status = response.toStatus();
if (!status.isOK()) {
@@ -1619,7 +1622,7 @@ StatusWith<bool> ShardingCatalogClientImpl::updateConfigDocument(
return (nSelected == 1);
}
-Status ShardingCatalogClientImpl::removeConfigDocuments(OperationContext* txn,
+Status ShardingCatalogClientImpl::removeConfigDocuments(OperationContext* opCtx,
const string& ns,
const BSONObj& query,
const WriteConcernOptions& writeConcern) {
@@ -1637,21 +1640,21 @@ Status ShardingCatalogClientImpl::removeConfigDocuments(OperationContext* txn,
request.setNS(nss);
request.setWriteConcern(writeConcern.toBSON());
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto response =
- configShard->runBatchWriteCommandOnConfig(txn, request, Shard::RetryPolicy::kIdempotent);
+ configShard->runBatchWriteCommandOnConfig(opCtx, request, Shard::RetryPolicy::kIdempotent);
return response.toStatus();
}
-Status ShardingCatalogClientImpl::_checkDbDoesNotExist(OperationContext* txn,
+Status ShardingCatalogClientImpl::_checkDbDoesNotExist(OperationContext* opCtx,
const string& dbName,
DatabaseType* db) {
BSONObjBuilder queryBuilder;
queryBuilder.appendRegex(
DatabaseType::name(), (string) "^" + pcrecpp::RE::QuoteMeta(dbName) + "$", "i");
- auto findStatus = _exhaustiveFindOnConfig(txn,
+ auto findStatus = _exhaustiveFindOnConfig(opCtx,
kConfigReadSelector,
repl::ReadConcernLevel::kMajorityReadConcern,
NamespaceString(DatabaseType::ConfigNS),
@@ -1692,7 +1695,7 @@ Status ShardingCatalogClientImpl::_checkDbDoesNotExist(OperationContext* txn,
}
Status ShardingCatalogClientImpl::_createCappedConfigCollection(
- OperationContext* txn,
+ OperationContext* opCtx,
StringData collName,
int cappedSize,
const WriteConcernOptions& writeConcern) {
@@ -1701,8 +1704,8 @@ Status ShardingCatalogClientImpl::_createCappedConfigCollection(
<< writeConcern.toBSON());
auto result =
- Grid::get(txn)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
- txn,
+ Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"config",
createCmd,
@@ -1728,7 +1731,7 @@ Status ShardingCatalogClientImpl::_createCappedConfigCollection(
return result.getValue().writeConcernStatus;
}
-StatusWith<long long> ShardingCatalogClientImpl::_runCountCommandOnConfig(OperationContext* txn,
+StatusWith<long long> ShardingCatalogClientImpl::_runCountCommandOnConfig(OperationContext* opCtx,
const NamespaceString& ns,
BSONObj query) {
BSONObjBuilder countBuilder;
@@ -1736,9 +1739,9 @@ StatusWith<long long> ShardingCatalogClientImpl::_runCountCommandOnConfig(Operat
countBuilder.append("query", query);
_appendReadConcern(&countBuilder);
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto resultStatus =
- configShard->runCommandWithFixedRetryAttempts(txn,
+ configShard->runCommandWithFixedRetryAttempts(opCtx,
kConfigReadSelector,
ns.db().toString(),
countBuilder.done(),
@@ -1763,15 +1766,15 @@ StatusWith<long long> ShardingCatalogClientImpl::_runCountCommandOnConfig(Operat
}
StatusWith<repl::OpTimeWith<vector<BSONObj>>> ShardingCatalogClientImpl::_exhaustiveFindOnConfig(
- OperationContext* txn,
+ OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
repl::ReadConcernLevel readConcern,
const NamespaceString& nss,
const BSONObj& query,
const BSONObj& sort,
boost::optional<long long> limit) {
- auto response = Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
- txn, readPref, readConcern, nss, query, sort, limit);
+ auto response = Grid::get(opCtx)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ opCtx, readPref, readConcern, nss, query, sort, limit);
if (!response.isOK()) {
return response.getStatus();
}
@@ -1787,10 +1790,10 @@ void ShardingCatalogClientImpl::_appendReadConcern(BSONObjBuilder* builder) {
}
Status ShardingCatalogClientImpl::appendInfoForConfigServerDatabases(
- OperationContext* txn, const BSONObj& listDatabasesCmd, BSONArrayBuilder* builder) {
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ OperationContext* opCtx, const BSONObj& listDatabasesCmd, BSONArrayBuilder* builder) {
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto resultStatus =
- configShard->runCommandWithFixedRetryAttempts(txn,
+ configShard->runCommandWithFixedRetryAttempts(opCtx,
kConfigPrimaryPreferredSelector,
"admin",
listDatabasesCmd,
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.h b/src/mongo/s/catalog/sharding_catalog_client_impl.h
index 898b3774456..0a94a3a18eb 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.h
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.h
@@ -58,32 +58,32 @@ public:
*/
Status startup() override;
- void shutDown(OperationContext* txn) override;
+ void shutDown(OperationContext* opCtx) override;
- Status enableSharding(OperationContext* txn, const std::string& dbName) override;
+ Status enableSharding(OperationContext* opCtx, const std::string& dbName) override;
- Status updateDatabase(OperationContext* txn,
+ Status updateDatabase(OperationContext* opCtx,
const std::string& dbName,
const DatabaseType& db) override;
- Status updateCollection(OperationContext* txn,
+ Status updateCollection(OperationContext* opCtx,
const std::string& collNs,
const CollectionType& coll) override;
- Status createDatabase(OperationContext* txn, const std::string& dbName) override;
+ Status createDatabase(OperationContext* opCtx, const std::string& dbName) override;
- Status logAction(OperationContext* txn,
+ Status logAction(OperationContext* opCtx,
const std::string& what,
const std::string& ns,
const BSONObj& detail) override;
- Status logChange(OperationContext* txn,
+ Status logChange(OperationContext* opCtx,
const std::string& what,
const std::string& ns,
const BSONObj& detail,
const WriteConcernOptions& writeConcern) override;
- Status shardCollection(OperationContext* txn,
+ Status shardCollection(OperationContext* opCtx,
const std::string& ns,
const ShardKeyPattern& fieldsAndOrder,
const BSONObj& defaultCollation,
@@ -91,27 +91,27 @@ public:
const std::vector<BSONObj>& initPoints,
const std::set<ShardId>& initShardsIds) override;
- StatusWith<ShardDrainingStatus> removeShard(OperationContext* txn,
+ StatusWith<ShardDrainingStatus> removeShard(OperationContext* opCtx,
const ShardId& name) override;
- StatusWith<repl::OpTimeWith<DatabaseType>> getDatabase(OperationContext* txn,
+ StatusWith<repl::OpTimeWith<DatabaseType>> getDatabase(OperationContext* opCtx,
const std::string& dbName) override;
- StatusWith<repl::OpTimeWith<CollectionType>> getCollection(OperationContext* txn,
+ StatusWith<repl::OpTimeWith<CollectionType>> getCollection(OperationContext* opCtx,
const std::string& collNs) override;
- Status getCollections(OperationContext* txn,
+ Status getCollections(OperationContext* opCtx,
const std::string* dbName,
std::vector<CollectionType>* collections,
repl::OpTime* optime) override;
- Status dropCollection(OperationContext* txn, const NamespaceString& ns) override;
+ Status dropCollection(OperationContext* opCtx, const NamespaceString& ns) override;
- Status getDatabasesForShard(OperationContext* txn,
+ Status getDatabasesForShard(OperationContext* opCtx,
const ShardId& shardName,
std::vector<std::string>* dbs) override;
- Status getChunks(OperationContext* txn,
+ Status getChunks(OperationContext* opCtx,
const BSONObj& query,
const BSONObj& sort,
boost::optional<int> limit,
@@ -119,25 +119,25 @@ public:
repl::OpTime* opTime,
repl::ReadConcernLevel readConcern) override;
- Status getTagsForCollection(OperationContext* txn,
+ Status getTagsForCollection(OperationContext* opCtx,
const std::string& collectionNs,
std::vector<TagsType>* tags) override;
StatusWith<repl::OpTimeWith<std::vector<ShardType>>> getAllShards(
- OperationContext* txn, repl::ReadConcernLevel readConcern) override;
+ OperationContext* opCtx, repl::ReadConcernLevel readConcern) override;
- bool runUserManagementWriteCommand(OperationContext* txn,
+ bool runUserManagementWriteCommand(OperationContext* opCtx,
const std::string& commandName,
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder* result) override;
- bool runUserManagementReadCommand(OperationContext* txn,
+ bool runUserManagementReadCommand(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder* result) override;
- Status applyChunkOpsDeprecated(OperationContext* txn,
+ Status applyChunkOpsDeprecated(OperationContext* opCtx,
const BSONArray& updateOps,
const BSONArray& preCondition,
const std::string& nss,
@@ -145,42 +145,42 @@ public:
const WriteConcernOptions& writeConcern,
repl::ReadConcernLevel readConcern) override;
- StatusWith<BSONObj> getGlobalSettings(OperationContext* txn, StringData key) override;
+ StatusWith<BSONObj> getGlobalSettings(OperationContext* opCtx, StringData key) override;
- StatusWith<VersionType> getConfigVersion(OperationContext* txn,
+ StatusWith<VersionType> getConfigVersion(OperationContext* opCtx,
repl::ReadConcernLevel readConcern) override;
- void writeConfigServerDirect(OperationContext* txn,
+ void writeConfigServerDirect(OperationContext* opCtx,
const BatchedCommandRequest& request,
BatchedCommandResponse* response) override;
- Status insertConfigDocument(OperationContext* txn,
+ Status insertConfigDocument(OperationContext* opCtx,
const std::string& ns,
const BSONObj& doc,
const WriteConcernOptions& writeConcern) override;
- StatusWith<bool> updateConfigDocument(OperationContext* txn,
+ StatusWith<bool> updateConfigDocument(OperationContext* opCtx,
const std::string& ns,
const BSONObj& query,
const BSONObj& update,
bool upsert,
const WriteConcernOptions& writeConcern) override;
- Status removeConfigDocuments(OperationContext* txn,
+ Status removeConfigDocuments(OperationContext* opCtx,
const std::string& ns,
const BSONObj& query,
const WriteConcernOptions& writeConcern) override;
DistLockManager* getDistLockManager() override;
- Status appendInfoForConfigServerDatabases(OperationContext* txn,
+ Status appendInfoForConfigServerDatabases(OperationContext* opCtx,
const BSONObj& listDatabasesCmd,
BSONArrayBuilder* builder) override;
/**
* Runs a read command against the config server with majority read concern.
*/
- bool runReadCommandForTest(OperationContext* txn,
+ bool runReadCommandForTest(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder* result);
@@ -190,7 +190,7 @@ private:
* Selects an optimal shard on which to place a newly created database from the set of
* available shards. Will return ShardNotFound if shard could not be found.
*/
- static StatusWith<ShardId> _selectShardForNewDatabase(OperationContext* txn,
+ static StatusWith<ShardId> _selectShardForNewDatabase(OperationContext* opCtx,
ShardRegistry* shardRegistry);
/**
@@ -203,12 +203,14 @@ private:
* NamespaceExists if it exists with the same casing
* DatabaseDifferCase if it exists under different casing.
*/
- Status _checkDbDoesNotExist(OperationContext* txn, const std::string& dbName, DatabaseType* db);
+ Status _checkDbDoesNotExist(OperationContext* opCtx,
+ const std::string& dbName,
+ DatabaseType* db);
/**
* Creates the specified collection name in the config database.
*/
- Status _createCappedConfigCollection(OperationContext* txn,
+ Status _createCappedConfigCollection(OperationContext* opCtx,
StringData collName,
int cappedSize,
const WriteConcernOptions& writeConcern);
@@ -217,12 +219,12 @@ private:
* Helper method for running a count command against the config server with appropriate
* error handling.
*/
- StatusWith<long long> _runCountCommandOnConfig(OperationContext* txn,
+ StatusWith<long long> _runCountCommandOnConfig(OperationContext* opCtx,
const NamespaceString& ns,
BSONObj query);
StatusWith<repl::OpTimeWith<std::vector<BSONObj>>> _exhaustiveFindOnConfig(
- OperationContext* txn,
+ OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
repl::ReadConcernLevel readConcern,
const NamespaceString& nss,
@@ -240,21 +242,21 @@ private:
* given read preference. Returns NamespaceNotFound if no database metadata is found.
*/
StatusWith<repl::OpTimeWith<DatabaseType>> _fetchDatabaseMetadata(
- OperationContext* txn, const std::string& dbName, const ReadPreferenceSetting& readPref);
+ OperationContext* opCtx, const std::string& dbName, const ReadPreferenceSetting& readPref);
/**
* Best effort method, which logs diagnostic events on the config server. If the config server
* write fails for any reason a warning will be written to the local service log and the method
* will return a failed status.
*
- * @param txn Operation context in which the call is running
+ * @param opCtx Operation context in which the call is running
* @param logCollName Which config collection to write to (excluding the database name)
* @param what E.g. "split", "migrate" (not interpreted)
* @param operationNS To which collection the metadata change is being applied (not interpreted)
* @param detail Additional info about the metadata change (not interpreted)
* @param writeConcern Write concern options to use for logging
*/
- Status _log(OperationContext* txn,
+ Status _log(OperationContext* opCtx,
const StringData& logCollName,
const std::string& what,
const std::string& operationNS,
diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
index 11e03fb4c70..730a411af29 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
@@ -56,17 +56,18 @@ Status ShardingCatalogClientMock::startup() {
return Status::OK();
}
-void ShardingCatalogClientMock::shutDown(OperationContext* txn) {
+void ShardingCatalogClientMock::shutDown(OperationContext* opCtx) {
if (_distLockManager) {
- _distLockManager->shutDown(txn);
+ _distLockManager->shutDown(opCtx);
}
}
-Status ShardingCatalogClientMock::enableSharding(OperationContext* txn, const std::string& dbName) {
+Status ShardingCatalogClientMock::enableSharding(OperationContext* opCtx,
+ const std::string& dbName) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
-Status ShardingCatalogClientMock::shardCollection(OperationContext* txn,
+Status ShardingCatalogClientMock::shardCollection(OperationContext* opCtx,
const string& ns,
const ShardKeyPattern& fieldsAndOrder,
const BSONObj& defaultCollation,
@@ -76,51 +77,52 @@ Status ShardingCatalogClientMock::shardCollection(OperationContext* txn,
return {ErrorCodes::InternalError, "Method not implemented"};
}
-StatusWith<ShardDrainingStatus> ShardingCatalogClientMock::removeShard(OperationContext* txn,
+StatusWith<ShardDrainingStatus> ShardingCatalogClientMock::removeShard(OperationContext* opCtx,
const ShardId& name) {
return ShardDrainingStatus::COMPLETED;
}
-Status ShardingCatalogClientMock::updateDatabase(OperationContext* txn,
+Status ShardingCatalogClientMock::updateDatabase(OperationContext* opCtx,
const string& dbName,
const DatabaseType& db) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientMock::getDatabase(
- OperationContext* txn, const string& dbName) {
+ OperationContext* opCtx, const string& dbName) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
-Status ShardingCatalogClientMock::updateCollection(OperationContext* txn,
+Status ShardingCatalogClientMock::updateCollection(OperationContext* opCtx,
const string& collNs,
const CollectionType& coll) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
StatusWith<repl::OpTimeWith<CollectionType>> ShardingCatalogClientMock::getCollection(
- OperationContext* txn, const string& collNs) {
+ OperationContext* opCtx, const string& collNs) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
-Status ShardingCatalogClientMock::getCollections(OperationContext* txn,
+Status ShardingCatalogClientMock::getCollections(OperationContext* opCtx,
const string* dbName,
vector<CollectionType>* collections,
repl::OpTime* optime) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
-Status ShardingCatalogClientMock::dropCollection(OperationContext* txn, const NamespaceString& ns) {
+Status ShardingCatalogClientMock::dropCollection(OperationContext* opCtx,
+ const NamespaceString& ns) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
-Status ShardingCatalogClientMock::getDatabasesForShard(OperationContext* txn,
+Status ShardingCatalogClientMock::getDatabasesForShard(OperationContext* opCtx,
const ShardId& shardName,
vector<string>* dbs) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
-Status ShardingCatalogClientMock::getChunks(OperationContext* txn,
+Status ShardingCatalogClientMock::getChunks(OperationContext* opCtx,
const BSONObj& filter,
const BSONObj& sort,
boost::optional<int> limit,
@@ -130,18 +132,18 @@ Status ShardingCatalogClientMock::getChunks(OperationContext* txn,
return {ErrorCodes::InternalError, "Method not implemented"};
}
-Status ShardingCatalogClientMock::getTagsForCollection(OperationContext* txn,
+Status ShardingCatalogClientMock::getTagsForCollection(OperationContext* opCtx,
const string& collectionNs,
vector<TagsType>* tags) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
StatusWith<repl::OpTimeWith<std::vector<ShardType>>> ShardingCatalogClientMock::getAllShards(
- OperationContext* txn, repl::ReadConcernLevel readConcern) {
+ OperationContext* opCtx, repl::ReadConcernLevel readConcern) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
-bool ShardingCatalogClientMock::runUserManagementWriteCommand(OperationContext* txn,
+bool ShardingCatalogClientMock::runUserManagementWriteCommand(OperationContext* opCtx,
const string& commandName,
const string& dbname,
const BSONObj& cmdObj,
@@ -149,14 +151,14 @@ bool ShardingCatalogClientMock::runUserManagementWriteCommand(OperationContext*
return true;
}
-bool ShardingCatalogClientMock::runUserManagementReadCommand(OperationContext* txn,
+bool ShardingCatalogClientMock::runUserManagementReadCommand(OperationContext* opCtx,
const string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder* result) {
return true;
}
-Status ShardingCatalogClientMock::applyChunkOpsDeprecated(OperationContext* txn,
+Status ShardingCatalogClientMock::applyChunkOpsDeprecated(OperationContext* opCtx,
const BSONArray& updateOps,
const BSONArray& preCondition,
const std::string& nss,
@@ -166,14 +168,14 @@ Status ShardingCatalogClientMock::applyChunkOpsDeprecated(OperationContext* txn,
return {ErrorCodes::InternalError, "Method not implemented"};
}
-Status ShardingCatalogClientMock::logAction(OperationContext* txn,
+Status ShardingCatalogClientMock::logAction(OperationContext* opCtx,
const std::string& what,
const std::string& ns,
const BSONObj& detail) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
-Status ShardingCatalogClientMock::logChange(OperationContext* txn,
+Status ShardingCatalogClientMock::logChange(OperationContext* opCtx,
const string& what,
const string& ns,
const BSONObj& detail,
@@ -181,21 +183,21 @@ Status ShardingCatalogClientMock::logChange(OperationContext* txn,
return {ErrorCodes::InternalError, "Method not implemented"};
}
-StatusWith<BSONObj> ShardingCatalogClientMock::getGlobalSettings(OperationContext* txn,
+StatusWith<BSONObj> ShardingCatalogClientMock::getGlobalSettings(OperationContext* opCtx,
StringData key) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
StatusWith<VersionType> ShardingCatalogClientMock::getConfigVersion(
- OperationContext* txn, repl::ReadConcernLevel readConcern) {
+ OperationContext* opCtx, repl::ReadConcernLevel readConcern) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
-void ShardingCatalogClientMock::writeConfigServerDirect(OperationContext* txn,
+void ShardingCatalogClientMock::writeConfigServerDirect(OperationContext* opCtx,
const BatchedCommandRequest& request,
BatchedCommandResponse* response) {}
-Status ShardingCatalogClientMock::insertConfigDocument(OperationContext* txn,
+Status ShardingCatalogClientMock::insertConfigDocument(OperationContext* opCtx,
const std::string& ns,
const BSONObj& doc,
const WriteConcernOptions& writeConcern) {
@@ -203,7 +205,7 @@ Status ShardingCatalogClientMock::insertConfigDocument(OperationContext* txn,
}
StatusWith<bool> ShardingCatalogClientMock::updateConfigDocument(
- OperationContext* txn,
+ OperationContext* opCtx,
const std::string& ns,
const BSONObj& query,
const BSONObj& update,
@@ -212,14 +214,15 @@ StatusWith<bool> ShardingCatalogClientMock::updateConfigDocument(
return {ErrorCodes::InternalError, "Method not implemented"};
}
-Status ShardingCatalogClientMock::removeConfigDocuments(OperationContext* txn,
+Status ShardingCatalogClientMock::removeConfigDocuments(OperationContext* opCtx,
const std::string& ns,
const BSONObj& query,
const WriteConcernOptions& writeConcern) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
-Status ShardingCatalogClientMock::createDatabase(OperationContext* txn, const std::string& dbName) {
+Status ShardingCatalogClientMock::createDatabase(OperationContext* opCtx,
+ const std::string& dbName) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
@@ -228,7 +231,7 @@ DistLockManager* ShardingCatalogClientMock::getDistLockManager() {
}
Status ShardingCatalogClientMock::appendInfoForConfigServerDatabases(
- OperationContext* txn, const BSONObj& listDatabasesCmd, BSONArrayBuilder* builder) {
+ OperationContext* opCtx, const BSONObj& listDatabasesCmd, BSONArrayBuilder* builder) {
return Status::OK();
}
diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.h b/src/mongo/s/catalog/sharding_catalog_client_mock.h
index 658681fd37a..a2d223f2384 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_mock.h
+++ b/src/mongo/s/catalog/sharding_catalog_client_mock.h
@@ -43,11 +43,11 @@ public:
Status startup() override;
- void shutDown(OperationContext* txn) override;
+ void shutDown(OperationContext* opCtx) override;
- Status enableSharding(OperationContext* txn, const std::string& dbName);
+ Status enableSharding(OperationContext* opCtx, const std::string& dbName);
- Status shardCollection(OperationContext* txn,
+ Status shardCollection(OperationContext* opCtx,
const std::string& ns,
const ShardKeyPattern& fieldsAndOrder,
const BSONObj& defaultCollation,
@@ -55,35 +55,35 @@ public:
const std::vector<BSONObj>& initPoints,
const std::set<ShardId>& initShardIds) override;
- StatusWith<ShardDrainingStatus> removeShard(OperationContext* txn,
+ StatusWith<ShardDrainingStatus> removeShard(OperationContext* opCtx,
const ShardId& name) override;
- Status updateDatabase(OperationContext* txn,
+ Status updateDatabase(OperationContext* opCtx,
const std::string& dbName,
const DatabaseType& db) override;
- StatusWith<repl::OpTimeWith<DatabaseType>> getDatabase(OperationContext* txn,
+ StatusWith<repl::OpTimeWith<DatabaseType>> getDatabase(OperationContext* opCtx,
const std::string& dbName) override;
- Status updateCollection(OperationContext* txn,
+ Status updateCollection(OperationContext* opCtx,
const std::string& collNs,
const CollectionType& coll) override;
- StatusWith<repl::OpTimeWith<CollectionType>> getCollection(OperationContext* txn,
+ StatusWith<repl::OpTimeWith<CollectionType>> getCollection(OperationContext* opCtx,
const std::string& collNs) override;
- Status getCollections(OperationContext* txn,
+ Status getCollections(OperationContext* opCtx,
const std::string* dbName,
std::vector<CollectionType>* collections,
repl::OpTime* optime) override;
- Status dropCollection(OperationContext* txn, const NamespaceString& ns) override;
+ Status dropCollection(OperationContext* opCtx, const NamespaceString& ns) override;
- Status getDatabasesForShard(OperationContext* txn,
+ Status getDatabasesForShard(OperationContext* opCtx,
const ShardId& shardName,
std::vector<std::string>* dbs) override;
- Status getChunks(OperationContext* txn,
+ Status getChunks(OperationContext* opCtx,
const BSONObj& filter,
const BSONObj& sort,
boost::optional<int> limit,
@@ -91,25 +91,25 @@ public:
repl::OpTime* opTime,
repl::ReadConcernLevel readConcern) override;
- Status getTagsForCollection(OperationContext* txn,
+ Status getTagsForCollection(OperationContext* opCtx,
const std::string& collectionNs,
std::vector<TagsType>* tags) override;
StatusWith<repl::OpTimeWith<std::vector<ShardType>>> getAllShards(
- OperationContext* txn, repl::ReadConcernLevel readConcern) override;
+ OperationContext* opCtx, repl::ReadConcernLevel readConcern) override;
- bool runUserManagementWriteCommand(OperationContext* txn,
+ bool runUserManagementWriteCommand(OperationContext* opCtx,
const std::string& commandName,
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder* result) override;
- bool runUserManagementReadCommand(OperationContext* txn,
+ bool runUserManagementReadCommand(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder* result) override;
- Status applyChunkOpsDeprecated(OperationContext* txn,
+ Status applyChunkOpsDeprecated(OperationContext* opCtx,
const BSONArray& updateOps,
const BSONArray& preCondition,
const std::string& nss,
@@ -117,48 +117,48 @@ public:
const WriteConcernOptions& writeConcern,
repl::ReadConcernLevel readConcern) override;
- Status logAction(OperationContext* txn,
+ Status logAction(OperationContext* opCtx,
const std::string& what,
const std::string& ns,
const BSONObj& detail) override;
- Status logChange(OperationContext* txn,
+ Status logChange(OperationContext* opCtx,
const std::string& what,
const std::string& ns,
const BSONObj& detail,
const WriteConcernOptions& writeConcern) override;
- StatusWith<BSONObj> getGlobalSettings(OperationContext* txn, StringData key) override;
+ StatusWith<BSONObj> getGlobalSettings(OperationContext* opCtx, StringData key) override;
- StatusWith<VersionType> getConfigVersion(OperationContext* txn,
+ StatusWith<VersionType> getConfigVersion(OperationContext* opCtx,
repl::ReadConcernLevel readConcern) override;
- void writeConfigServerDirect(OperationContext* txn,
+ void writeConfigServerDirect(OperationContext* opCtx,
const BatchedCommandRequest& request,
BatchedCommandResponse* response) override;
- Status insertConfigDocument(OperationContext* txn,
+ Status insertConfigDocument(OperationContext* opCtx,
const std::string& ns,
const BSONObj& doc,
const WriteConcernOptions& writeConcern) override;
- StatusWith<bool> updateConfigDocument(OperationContext* txn,
+ StatusWith<bool> updateConfigDocument(OperationContext* opCtx,
const std::string& ns,
const BSONObj& query,
const BSONObj& update,
bool upsert,
const WriteConcernOptions& writeConcern) override;
- Status removeConfigDocuments(OperationContext* txn,
+ Status removeConfigDocuments(OperationContext* opCtx,
const std::string& ns,
const BSONObj& query,
const WriteConcernOptions& writeConcern) override;
- Status createDatabase(OperationContext* txn, const std::string& dbName);
+ Status createDatabase(OperationContext* opCtx, const std::string& dbName);
DistLockManager* getDistLockManager() override;
- Status appendInfoForConfigServerDatabases(OperationContext* txn,
+ Status appendInfoForConfigServerDatabases(OperationContext* opCtx,
const BSONObj& listDatabasesCmd,
BSONArrayBuilder* builder) override;
diff --git a/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp b/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp
index 99fdcf0ecab..0ce97c5375e 100644
--- a/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp
@@ -220,26 +220,26 @@ TEST_F(ConfigInitializationTest, ReRunsIfDocRolledBackThenReElected) {
});
operationContext()->setReplicatedWrites(false);
replicationCoordinator()->setFollowerMode(repl::MemberState::RS_ROLLBACK);
- auto txn = operationContext();
+ auto opCtx = operationContext();
auto nss = NamespaceString(VersionType::ConfigNS);
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetCollection autoColl(txn, nss, MODE_IX);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ AutoGetCollection autoColl(opCtx, nss, MODE_IX);
auto coll = autoColl.getCollection();
ASSERT_TRUE(coll);
- auto cursor = coll->getCursor(txn);
+ auto cursor = coll->getCursor(opCtx);
std::vector<RecordId> recordIds;
while (auto recordId = cursor->next()) {
recordIds.push_back(recordId->id);
}
- mongo::WriteUnitOfWork wuow(txn);
+ mongo::WriteUnitOfWork wuow(opCtx);
for (auto recordId : recordIds) {
- coll->deleteDocument(txn, recordId, nullptr);
+ coll->deleteDocument(opCtx, recordId, nullptr);
}
wuow.commit();
- ASSERT_EQUALS(0UL, coll->numRecords(txn));
+ ASSERT_EQUALS(0UL, coll->numRecords(opCtx));
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "removeConfigDocuments", nss.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "removeConfigDocuments", nss.ns());
}
// Verify the document was actually removed.
diff --git a/src/mongo/s/catalog/sharding_catalog_manager.h b/src/mongo/s/catalog/sharding_catalog_manager.h
index e3cae76b7e3..f2e041ace84 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager.h
+++ b/src/mongo/s/catalog/sharding_catalog_manager.h
@@ -83,7 +83,7 @@ public:
/**
* Performs necessary cleanup when shutting down cleanly.
*/
- virtual void shutDown(OperationContext* txn) = 0;
+ virtual void shutDown(OperationContext* opCtx) = 0;
/**
*
@@ -98,7 +98,7 @@ public:
* no limitation to space usage.
* @return either an !OK status or the name of the newly added shard.
*/
- virtual StatusWith<std::string> addShard(OperationContext* txn,
+ virtual StatusWith<std::string> addShard(OperationContext* opCtx,
const std::string* shardProposedName,
const ConnectionString& shardConnectionString,
const long long maxSize) = 0;
@@ -107,7 +107,7 @@ public:
* Adds the shard to the zone.
* Returns ErrorCodes::ShardNotFound if the shard does not exist.
*/
- virtual Status addShardToZone(OperationContext* txn,
+ virtual Status addShardToZone(OperationContext* opCtx,
const std::string& shardName,
const std::string& zoneName) = 0;
@@ -115,7 +115,7 @@ public:
* Removes the shard from the zone.
* Returns ErrorCodes::ShardNotFound if the shard does not exist.
*/
- virtual Status removeShardFromZone(OperationContext* txn,
+ virtual Status removeShardFromZone(OperationContext* opCtx,
const std::string& shardName,
const std::string& zoneName) = 0;
@@ -124,7 +124,7 @@ public:
* the shard key, the range will be converted into a new range with full shard key filled
* with MinKey values.
*/
- virtual Status assignKeyRangeToZone(OperationContext* txn,
+ virtual Status assignKeyRangeToZone(OperationContext* opCtx,
const NamespaceString& ns,
const ChunkRange& range,
const std::string& zoneName) = 0;
@@ -134,7 +134,7 @@ public:
* Note: unlike assignKeyRangeToZone, the given range will never be converted to include the
* full shard key.
*/
- virtual Status removeKeyRangeFromZone(OperationContext* txn,
+ virtual Status removeKeyRangeFromZone(OperationContext* opCtx,
const NamespaceString& ns,
const ChunkRange& range) = 0;
@@ -142,7 +142,7 @@ public:
* Updates metadata in config.chunks collection to show the given chunk as split
* into smaller chunks at the specified split points.
*/
- virtual Status commitChunkSplit(OperationContext* txn,
+ virtual Status commitChunkSplit(OperationContext* opCtx,
const NamespaceString& ns,
const OID& requestEpoch,
const ChunkRange& range,
@@ -153,7 +153,7 @@ public:
* Updates metadata in config.chunks collection so the chunks with given boundaries are seen
* merged into a single larger chunk.
*/
- virtual Status commitChunkMerge(OperationContext* txn,
+ virtual Status commitChunkMerge(OperationContext* opCtx,
const NamespaceString& ns,
const OID& requestEpoch,
const std::vector<BSONObj>& chunkBoundaries,
@@ -162,7 +162,7 @@ public:
/**
* Updates metadata in config.chunks collection to show the given chunk in its new shard.
*/
- virtual StatusWith<BSONObj> commitChunkMigration(OperationContext* txn,
+ virtual StatusWith<BSONObj> commitChunkMigration(OperationContext* opCtx,
const NamespaceString& nss,
const ChunkType& migratedChunk,
const boost::optional<ChunkType>& controlChunk,
@@ -179,7 +179,7 @@ public:
* Initializes the collections that live in the config server. Mostly this involves building
* necessary indexes and populating the config.version document.
*/
- virtual Status initializeConfigDatabaseIfNeeded(OperationContext* txn) = 0;
+ virtual Status initializeConfigDatabaseIfNeeded(OperationContext* opCtx) = 0;
/**
* Called if the config.version document is rolled back. Indicates to the
@@ -195,13 +195,13 @@ public:
* shardIdentity doc's configsvrConnString if the _id, shardName, and clusterId do not
* conflict).
*/
- virtual BSONObj createShardIdentityUpsertForAddShard(OperationContext* txn,
+ virtual BSONObj createShardIdentityUpsertForAddShard(OperationContext* opCtx,
const std::string& shardName) = 0;
/**
* Runs the setFeatureCompatibilityVersion command on all shards.
*/
- virtual Status setFeatureCompatibilityVersionOnShards(OperationContext* txn,
+ virtual Status setFeatureCompatibilityVersionOnShards(OperationContext* opCtx,
const std::string& version) = 0;
protected:
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp b/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp
index fe3e427f228..c1edeaad1dd 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp
@@ -139,13 +139,13 @@ BSONArray buildMergeChunksApplyOpsPrecond(const std::vector<ChunkType>& chunksTo
* has not been dropped and recreated since the migration began, unbeknown to the shard when the
* command was sent.
*/
-Status checkCollectionVersionEpoch(OperationContext* txn,
+Status checkCollectionVersionEpoch(OperationContext* opCtx,
const NamespaceString& nss,
const ChunkType& aChunk,
const OID& collectionEpoch) {
auto findResponseWith =
- Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
- txn,
+ Grid::get(opCtx)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
NamespaceString(ChunkType::ConfigNS),
@@ -192,7 +192,7 @@ Status checkCollectionVersionEpoch(OperationContext* txn,
return Status::OK();
}
-Status checkChunkIsOnShard(OperationContext* txn,
+Status checkChunkIsOnShard(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& min,
const BSONObj& max,
@@ -204,8 +204,8 @@ Status checkChunkIsOnShard(OperationContext* txn,
// Must use local read concern because we're going to perform subsequent writes.
auto findResponseWith =
- Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
- txn,
+ Grid::get(opCtx)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
NamespaceString(ChunkType::ConfigNS),
@@ -288,7 +288,7 @@ BSONObj makeCommitChunkApplyOpsCommand(const NamespaceString& nss,
} // namespace
-Status ShardingCatalogManagerImpl::commitChunkSplit(OperationContext* txn,
+Status ShardingCatalogManagerImpl::commitChunkSplit(OperationContext* opCtx,
const NamespaceString& ns,
const OID& requestEpoch,
const ChunkRange& range,
@@ -298,11 +298,11 @@ Status ShardingCatalogManagerImpl::commitChunkSplit(OperationContext* txn,
// migrations
// TODO(SERVER-25359): Replace with a collection-specific lock map to allow splits/merges/
// move chunks on different collections to proceed in parallel
- Lock::ExclusiveLock lk(txn->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock);
// Get the chunk with highest version for this namespace
- auto findStatus = Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
- txn,
+ auto findStatus = Grid::get(opCtx)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
NamespaceString(ChunkType::ConfigNS),
@@ -429,8 +429,8 @@ Status ShardingCatalogManagerImpl::commitChunkSplit(OperationContext* txn,
}
// apply the batch of updates to remote and local metadata
- Status applyOpsStatus = Grid::get(txn)->catalogClient(txn)->applyChunkOpsDeprecated(
- txn,
+ Status applyOpsStatus = Grid::get(opCtx)->catalogClient(opCtx)->applyChunkOpsDeprecated(
+ opCtx,
updates.arr(),
preCond.arr(),
ns.ns(),
@@ -454,8 +454,8 @@ Status ShardingCatalogManagerImpl::commitChunkSplit(OperationContext* txn,
appendShortVersion(&logDetail.subobjStart("left"), newChunks[0]);
appendShortVersion(&logDetail.subobjStart("right"), newChunks[1]);
- Grid::get(txn)->catalogClient(txn)->logChange(
- txn, "split", ns.ns(), logDetail.obj(), WriteConcernOptions());
+ Grid::get(opCtx)->catalogClient(opCtx)->logChange(
+ opCtx, "split", ns.ns(), logDetail.obj(), WriteConcernOptions());
} else {
BSONObj beforeDetailObj = logDetail.obj();
BSONObj firstDetailObj = beforeDetailObj.getOwned();
@@ -468,15 +468,15 @@ Status ShardingCatalogManagerImpl::commitChunkSplit(OperationContext* txn,
chunkDetail.append("of", newChunksSize);
appendShortVersion(&chunkDetail.subobjStart("chunk"), newChunks[i]);
- Grid::get(txn)->catalogClient(txn)->logChange(
- txn, "multi-split", ns.ns(), chunkDetail.obj(), WriteConcernOptions());
+ Grid::get(opCtx)->catalogClient(opCtx)->logChange(
+ opCtx, "multi-split", ns.ns(), chunkDetail.obj(), WriteConcernOptions());
}
}
return applyOpsStatus;
}
-Status ShardingCatalogManagerImpl::commitChunkMerge(OperationContext* txn,
+Status ShardingCatalogManagerImpl::commitChunkMerge(OperationContext* opCtx,
const NamespaceString& ns,
const OID& requestEpoch,
const std::vector<BSONObj>& chunkBoundaries,
@@ -488,11 +488,11 @@ Status ShardingCatalogManagerImpl::commitChunkMerge(OperationContext* txn,
// migrations
// TODO(SERVER-25359): Replace with a collection-specific lock map to allow splits/merges/
// move chunks on different collections to proceed in parallel
- Lock::ExclusiveLock lk(txn->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock);
// Get the chunk with the highest version for this namespace
- auto findStatus = Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
- txn,
+ auto findStatus = Grid::get(opCtx)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
NamespaceString(ChunkType::ConfigNS),
@@ -554,8 +554,8 @@ Status ShardingCatalogManagerImpl::commitChunkMerge(OperationContext* txn,
auto preCond = buildMergeChunksApplyOpsPrecond(chunksToMerge, collVersion);
// apply the batch of updates to remote and local metadata
- Status applyOpsStatus = Grid::get(txn)->catalogClient(txn)->applyChunkOpsDeprecated(
- txn,
+ Status applyOpsStatus = Grid::get(opCtx)->catalogClient(opCtx)->applyChunkOpsDeprecated(
+ opCtx,
updates,
preCond,
ns.ns(),
@@ -577,14 +577,14 @@ Status ShardingCatalogManagerImpl::commitChunkMerge(OperationContext* txn,
collVersion.addToBSON(logDetail, "prevShardVersion");
mergeVersion.addToBSON(logDetail, "mergedVersion");
- Grid::get(txn)->catalogClient(txn)->logChange(
- txn, "merge", ns.ns(), logDetail.obj(), WriteConcernOptions());
+ Grid::get(opCtx)->catalogClient(opCtx)->logChange(
+ opCtx, "merge", ns.ns(), logDetail.obj(), WriteConcernOptions());
return applyOpsStatus;
}
StatusWith<BSONObj> ShardingCatalogManagerImpl::commitChunkMigration(
- OperationContext* txn,
+ OperationContext* opCtx,
const NamespaceString& nss,
const ChunkType& migratedChunk,
const boost::optional<ChunkType>& controlChunk,
@@ -602,11 +602,11 @@ StatusWith<BSONObj> ShardingCatalogManagerImpl::commitChunkMigration(
// TODO(SERVER-25359): Replace with a collection-specific lock map to allow splits/merges/
// move chunks on different collections to proceed in parallel.
// (Note: This is not needed while we have a global lock, taken here only for consistency.)
- Lock::ExclusiveLock lk(txn->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock);
// Ensure that the epoch passed in still matches the real state of the database.
- auto epochCheck = checkCollectionVersionEpoch(txn, nss, migratedChunk, collectionEpoch);
+ auto epochCheck = checkCollectionVersionEpoch(opCtx, nss, migratedChunk, collectionEpoch);
if (!epochCheck.isOK()) {
return epochCheck;
}
@@ -614,22 +614,22 @@ StatusWith<BSONObj> ShardingCatalogManagerImpl::commitChunkMigration(
// Check that migratedChunk and controlChunk are where they should be, on fromShard.
auto migratedOnShard =
- checkChunkIsOnShard(txn, nss, migratedChunk.getMin(), migratedChunk.getMax(), fromShard);
+ checkChunkIsOnShard(opCtx, nss, migratedChunk.getMin(), migratedChunk.getMax(), fromShard);
if (!migratedOnShard.isOK()) {
return migratedOnShard;
}
if (controlChunk) {
auto controlOnShard = checkChunkIsOnShard(
- txn, nss, controlChunk->getMin(), controlChunk->getMax(), fromShard);
+ opCtx, nss, controlChunk->getMin(), controlChunk->getMax(), fromShard);
if (!controlOnShard.isOK()) {
return controlOnShard;
}
}
// Must use local read concern because we will perform subsequent writes.
- auto findResponse = Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
- txn,
+ auto findResponse = Grid::get(opCtx)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
NamespaceString(ChunkType::ConfigNS),
@@ -671,8 +671,8 @@ StatusWith<BSONObj> ShardingCatalogManagerImpl::commitChunkMigration(
nss, newMigratedChunk, newControlChunk, fromShard.toString(), toShard.toString());
StatusWith<Shard::CommandResponse> applyOpsCommandResponse =
- Grid::get(txn)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
- txn,
+ Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
nss.db().toString(),
command,
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_impl.cpp b/src/mongo/s/catalog/sharding_catalog_manager_impl.cpp
index 6c3e9ac749c..ca2245f540b 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_impl.cpp
@@ -75,7 +75,7 @@ Status ShardingCatalogManagerImpl::startup() {
return Status::OK();
}
-void ShardingCatalogManagerImpl::shutDown(OperationContext* txn) {
+void ShardingCatalogManagerImpl::shutDown(OperationContext* opCtx) {
LOG(1) << "ShardingCatalogManagerImpl::shutDown() called.";
{
stdx::lock_guard<stdx::mutex> lk(_mutex);
@@ -86,7 +86,7 @@ void ShardingCatalogManagerImpl::shutDown(OperationContext* txn) {
_executorForAddShard->join();
}
-Status ShardingCatalogManagerImpl::initializeConfigDatabaseIfNeeded(OperationContext* txn) {
+Status ShardingCatalogManagerImpl::initializeConfigDatabaseIfNeeded(OperationContext* opCtx) {
{
stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_configInitialized) {
@@ -95,7 +95,7 @@ Status ShardingCatalogManagerImpl::initializeConfigDatabaseIfNeeded(OperationCon
}
}
- Status status = _initConfigIndexes(txn);
+ Status status = _initConfigIndexes(opCtx);
if (!status.isOK()) {
return status;
}
@@ -103,7 +103,7 @@ Status ShardingCatalogManagerImpl::initializeConfigDatabaseIfNeeded(OperationCon
// Make sure to write config.version last since we detect rollbacks of config.version and
// will re-run initializeConfigDatabaseIfNeeded if that happens, but we don't detect rollback
// of the index builds.
- status = _initConfigVersion(txn);
+ status = _initConfigVersion(opCtx);
if (!status.isOK()) {
return status;
}
@@ -119,11 +119,11 @@ void ShardingCatalogManagerImpl::discardCachedConfigDatabaseInitializationState(
_configInitialized = false;
}
-Status ShardingCatalogManagerImpl::_initConfigVersion(OperationContext* txn) {
- const auto catalogClient = Grid::get(txn)->catalogClient(txn);
+Status ShardingCatalogManagerImpl::_initConfigVersion(OperationContext* opCtx) {
+ const auto catalogClient = Grid::get(opCtx)->catalogClient(opCtx);
auto versionStatus =
- catalogClient->getConfigVersion(txn, repl::ReadConcernLevel::kLocalReadConcern);
+ catalogClient->getConfigVersion(opCtx, repl::ReadConcernLevel::kLocalReadConcern);
if (!versionStatus.isOK()) {
return versionStatus.getStatus();
}
@@ -144,7 +144,7 @@ Status ShardingCatalogManagerImpl::_initConfigVersion(OperationContext* txn) {
BSONObj versionObj(newVersion.toBSON());
auto insertStatus = catalogClient->insertConfigDocument(
- txn, VersionType::ConfigNS, versionObj, kNoWaitWriteConcern);
+ opCtx, VersionType::ConfigNS, versionObj, kNoWaitWriteConcern);
return insertStatus;
}
@@ -168,12 +168,12 @@ Status ShardingCatalogManagerImpl::_initConfigVersion(OperationContext* txn) {
return Status::OK();
}
-Status ShardingCatalogManagerImpl::_initConfigIndexes(OperationContext* txn) {
+Status ShardingCatalogManagerImpl::_initConfigIndexes(OperationContext* opCtx) {
const bool unique = true;
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
Status result =
- configShard->createIndexOnConfig(txn,
+ configShard->createIndexOnConfig(opCtx,
NamespaceString(ChunkType::ConfigNS),
BSON(ChunkType::ns() << 1 << ChunkType::min() << 1),
unique);
@@ -184,7 +184,7 @@ Status ShardingCatalogManagerImpl::_initConfigIndexes(OperationContext* txn) {
}
result = configShard->createIndexOnConfig(
- txn,
+ opCtx,
NamespaceString(ChunkType::ConfigNS),
BSON(ChunkType::ns() << 1 << ChunkType::shard() << 1 << ChunkType::min() << 1),
unique);
@@ -195,7 +195,7 @@ Status ShardingCatalogManagerImpl::_initConfigIndexes(OperationContext* txn) {
}
result = configShard->createIndexOnConfig(
- txn,
+ opCtx,
NamespaceString(ChunkType::ConfigNS),
BSON(ChunkType::ns() << 1 << ChunkType::DEPRECATED_lastmod() << 1),
unique);
@@ -206,7 +206,7 @@ Status ShardingCatalogManagerImpl::_initConfigIndexes(OperationContext* txn) {
}
result = configShard->createIndexOnConfig(
- txn,
+ opCtx,
NamespaceString(MigrationType::ConfigNS),
BSON(MigrationType::ns() << 1 << MigrationType::min() << 1),
unique);
@@ -217,7 +217,7 @@ Status ShardingCatalogManagerImpl::_initConfigIndexes(OperationContext* txn) {
}
result = configShard->createIndexOnConfig(
- txn, NamespaceString(ShardType::ConfigNS), BSON(ShardType::host() << 1), unique);
+ opCtx, NamespaceString(ShardType::ConfigNS), BSON(ShardType::host() << 1), unique);
if (!result.isOK()) {
return Status(result.code(),
str::stream() << "couldn't create host_1 index on config db"
@@ -225,7 +225,7 @@ Status ShardingCatalogManagerImpl::_initConfigIndexes(OperationContext* txn) {
}
result = configShard->createIndexOnConfig(
- txn, NamespaceString(LocksType::ConfigNS), BSON(LocksType::lockID() << 1), !unique);
+ opCtx, NamespaceString(LocksType::ConfigNS), BSON(LocksType::lockID() << 1), !unique);
if (!result.isOK()) {
return Status(result.code(),
str::stream() << "couldn't create lock id index on config db"
@@ -233,7 +233,7 @@ Status ShardingCatalogManagerImpl::_initConfigIndexes(OperationContext* txn) {
}
result =
- configShard->createIndexOnConfig(txn,
+ configShard->createIndexOnConfig(opCtx,
NamespaceString(LocksType::ConfigNS),
BSON(LocksType::state() << 1 << LocksType::process() << 1),
!unique);
@@ -244,14 +244,14 @@ Status ShardingCatalogManagerImpl::_initConfigIndexes(OperationContext* txn) {
}
result = configShard->createIndexOnConfig(
- txn, NamespaceString(LockpingsType::ConfigNS), BSON(LockpingsType::ping() << 1), !unique);
+ opCtx, NamespaceString(LockpingsType::ConfigNS), BSON(LockpingsType::ping() << 1), !unique);
if (!result.isOK()) {
return Status(result.code(),
str::stream() << "couldn't create lockping ping time index on config db"
<< causedBy(result));
}
- result = configShard->createIndexOnConfig(txn,
+ result = configShard->createIndexOnConfig(opCtx,
NamespaceString(TagsType::ConfigNS),
BSON(TagsType::ns() << 1 << TagsType::min() << 1),
unique);
@@ -261,7 +261,7 @@ Status ShardingCatalogManagerImpl::_initConfigIndexes(OperationContext* txn) {
<< causedBy(result));
}
- result = configShard->createIndexOnConfig(txn,
+ result = configShard->createIndexOnConfig(opCtx,
NamespaceString(TagsType::ConfigNS),
BSON(TagsType::ns() << 1 << TagsType::tag() << 1),
!unique);
@@ -275,22 +275,22 @@ Status ShardingCatalogManagerImpl::_initConfigIndexes(OperationContext* txn) {
}
Status ShardingCatalogManagerImpl::setFeatureCompatibilityVersionOnShards(
- OperationContext* txn, const std::string& version) {
+ OperationContext* opCtx, const std::string& version) {
// No shards should be added until we have forwarded featureCompatibilityVersion to all shards.
- Lock::SharedLock lk(txn->lockState(), _kShardMembershipLock);
+ Lock::SharedLock lk(opCtx->lockState(), _kShardMembershipLock);
std::vector<ShardId> shardIds;
- Grid::get(txn)->shardRegistry()->getAllShardIds(&shardIds);
+ Grid::get(opCtx)->shardRegistry()->getAllShardIds(&shardIds);
for (const ShardId& shardId : shardIds) {
- const auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+ const auto shardStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
continue;
}
const auto shard = shardStatus.getValue();
auto response = shard->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"admin",
BSON(FeatureCompatibilityVersion::kCommandName << version),
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_impl.h b/src/mongo/s/catalog/sharding_catalog_manager_impl.h
index 2b4cdd818a8..37207f81383 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_impl.h
+++ b/src/mongo/s/catalog/sharding_catalog_manager_impl.h
@@ -52,43 +52,43 @@ public:
*/
Status startup() override;
- void shutDown(OperationContext* txn) override;
+ void shutDown(OperationContext* opCtx) override;
- Status initializeConfigDatabaseIfNeeded(OperationContext* txn) override;
+ Status initializeConfigDatabaseIfNeeded(OperationContext* opCtx) override;
void discardCachedConfigDatabaseInitializationState() override;
- Status addShardToZone(OperationContext* txn,
+ Status addShardToZone(OperationContext* opCtx,
const std::string& shardName,
const std::string& zoneName) override;
- Status removeShardFromZone(OperationContext* txn,
+ Status removeShardFromZone(OperationContext* opCtx,
const std::string& shardName,
const std::string& zoneName) override;
- Status assignKeyRangeToZone(OperationContext* txn,
+ Status assignKeyRangeToZone(OperationContext* opCtx,
const NamespaceString& ns,
const ChunkRange& range,
const std::string& zoneName) override;
- Status removeKeyRangeFromZone(OperationContext* txn,
+ Status removeKeyRangeFromZone(OperationContext* opCtx,
const NamespaceString& ns,
const ChunkRange& range) override;
- Status commitChunkSplit(OperationContext* txn,
+ Status commitChunkSplit(OperationContext* opCtx,
const NamespaceString& ns,
const OID& requestEpoch,
const ChunkRange& range,
const std::vector<BSONObj>& splitPoints,
const std::string& shardName) override;
- Status commitChunkMerge(OperationContext* txn,
+ Status commitChunkMerge(OperationContext* opCtx,
const NamespaceString& ns,
const OID& requestEpoch,
const std::vector<BSONObj>& chunkBoundaries,
const std::string& shardName) override;
- StatusWith<BSONObj> commitChunkMigration(OperationContext* txn,
+ StatusWith<BSONObj> commitChunkMigration(OperationContext* opCtx,
const NamespaceString& nss,
const ChunkType& migratedChunk,
const boost::optional<ChunkType>& controlChunk,
@@ -98,15 +98,15 @@ public:
void appendConnectionStats(executor::ConnectionPoolStats* stats) override;
- StatusWith<std::string> addShard(OperationContext* txn,
+ StatusWith<std::string> addShard(OperationContext* opCtx,
const std::string* shardProposedName,
const ConnectionString& shardConnectionString,
const long long maxSize) override;
- BSONObj createShardIdentityUpsertForAddShard(OperationContext* txn,
+ BSONObj createShardIdentityUpsertForAddShard(OperationContext* opCtx,
const std::string& shardName) override;
- Status setFeatureCompatibilityVersionOnShards(OperationContext* txn,
+ Status setFeatureCompatibilityVersionOnShards(OperationContext* opCtx,
const std::string& version) override;
private:
@@ -114,12 +114,12 @@ private:
* Performs the necessary checks for version compatibility and creates a new config.version
* document if the current cluster config is empty.
*/
- Status _initConfigVersion(OperationContext* txn);
+ Status _initConfigVersion(OperationContext* opCtx);
/**
* Builds all the expected indexes on the config server.
*/
- Status _initConfigIndexes(OperationContext* txn);
+ Status _initConfigIndexes(OperationContext* opCtx);
/**
* Used during addShard to determine if there is already an existing shard that matches the
@@ -132,7 +132,7 @@ private:
* options, so the addShard attempt must be aborted.
*/
StatusWith<boost::optional<ShardType>> _checkIfShardExists(
- OperationContext* txn,
+ OperationContext* opCtx,
const ConnectionString& propsedShardConnectionString,
const std::string* shardProposedName,
long long maxSize);
@@ -153,7 +153,7 @@ private:
* shard's name should be checked and if empty, one should be generated using some uniform
* algorithm.
*/
- StatusWith<ShardType> _validateHostAsShard(OperationContext* txn,
+ StatusWith<ShardType> _validateHostAsShard(OperationContext* opCtx,
std::shared_ptr<RemoteCommandTargeter> targeter,
const std::string* shardProposedName,
const ConnectionString& connectionString);
@@ -164,13 +164,13 @@ private:
* purposes.
*/
StatusWith<std::vector<std::string>> _getDBNamesListFromShard(
- OperationContext* txn, std::shared_ptr<RemoteCommandTargeter> targeter);
+ OperationContext* opCtx, std::shared_ptr<RemoteCommandTargeter> targeter);
/**
* Runs a command against a "shard" that is not yet in the cluster and thus not present in the
* ShardRegistry.
*/
- StatusWith<Shard::CommandResponse> _runCommandForAddShard(OperationContext* txn,
+ StatusWith<Shard::CommandResponse> _runCommandForAddShard(OperationContext* opCtx,
RemoteCommandTargeter* targeter,
const std::string& dbName,
const BSONObj& cmdObj);
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp b/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp
index b65ba11f531..82b006780b3 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp
@@ -87,12 +87,12 @@ MONGO_FP_DECLARE(dontUpsertShardIdentityOnNewShards);
/**
* Generates a unique name to be given to a newly added shard.
*/
-StatusWith<std::string> generateNewShardName(OperationContext* txn) {
+StatusWith<std::string> generateNewShardName(OperationContext* opCtx) {
BSONObjBuilder shardNameRegex;
shardNameRegex.appendRegex(ShardType::name(), "^shard");
- auto findStatus = Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
- txn,
+ auto findStatus = Grid::get(opCtx)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ opCtx,
kConfigReadSelector,
repl::ReadConcernLevel::kMajorityReadConcern,
NamespaceString(ShardType::ConfigNS),
@@ -130,11 +130,11 @@ StatusWith<std::string> generateNewShardName(OperationContext* txn) {
} // namespace
StatusWith<Shard::CommandResponse> ShardingCatalogManagerImpl::_runCommandForAddShard(
- OperationContext* txn,
+ OperationContext* opCtx,
RemoteCommandTargeter* targeter,
const std::string& dbName,
const BSONObj& cmdObj) {
- auto host = targeter->findHost(txn, ReadPreferenceSetting{ReadPreference::PrimaryOnly});
+ auto host = targeter->findHost(opCtx, ReadPreferenceSetting{ReadPreference::PrimaryOnly});
if (!host.isOK()) {
return host.getStatus();
}
@@ -198,13 +198,13 @@ StatusWith<Shard::CommandResponse> ShardingCatalogManagerImpl::_runCommandForAdd
}
StatusWith<boost::optional<ShardType>> ShardingCatalogManagerImpl::_checkIfShardExists(
- OperationContext* txn,
+ OperationContext* opCtx,
const ConnectionString& proposedShardConnectionString,
const std::string* proposedShardName,
long long proposedShardMaxSize) {
// Check whether any host in the connection is already part of the cluster.
- const auto existingShards = Grid::get(txn)->catalogClient(txn)->getAllShards(
- txn, repl::ReadConcernLevel::kLocalReadConcern);
+ const auto existingShards = Grid::get(opCtx)->catalogClient(opCtx)->getAllShards(
+ opCtx, repl::ReadConcernLevel::kLocalReadConcern);
if (!existingShards.isOK()) {
return Status(existingShards.getStatus().code(),
str::stream() << "Failed to load existing shards during addShard"
@@ -293,7 +293,7 @@ StatusWith<boost::optional<ShardType>> ShardingCatalogManagerImpl::_checkIfShard
}
StatusWith<ShardType> ShardingCatalogManagerImpl::_validateHostAsShard(
- OperationContext* txn,
+ OperationContext* opCtx,
std::shared_ptr<RemoteCommandTargeter> targeter,
const std::string* shardProposedName,
const ConnectionString& connectionString) {
@@ -301,7 +301,7 @@ StatusWith<ShardType> ShardingCatalogManagerImpl::_validateHostAsShard(
// Check if the node being added is a mongos or a version of mongod too old to speak the current
// communication protocol.
auto swCommandResponse =
- _runCommandForAddShard(txn, targeter.get(), "admin", BSON("isMaster" << 1));
+ _runCommandForAddShard(opCtx, targeter.get(), "admin", BSON("isMaster" << 1));
if (!swCommandResponse.isOK()) {
if (swCommandResponse.getStatus() == ErrorCodes::RPCProtocolNegotiationFailed) {
// Mongos to mongos commands are no longer supported in the wire protocol
@@ -479,10 +479,10 @@ StatusWith<ShardType> ShardingCatalogManagerImpl::_validateHostAsShard(
}
StatusWith<std::vector<std::string>> ShardingCatalogManagerImpl::_getDBNamesListFromShard(
- OperationContext* txn, std::shared_ptr<RemoteCommandTargeter> targeter) {
+ OperationContext* opCtx, std::shared_ptr<RemoteCommandTargeter> targeter) {
auto swCommandResponse =
- _runCommandForAddShard(txn, targeter.get(), "admin", BSON("listDatabases" << 1));
+ _runCommandForAddShard(opCtx, targeter.get(), "admin", BSON("listDatabases" << 1));
if (!swCommandResponse.isOK()) {
return swCommandResponse.getStatus();
}
@@ -509,7 +509,7 @@ StatusWith<std::vector<std::string>> ShardingCatalogManagerImpl::_getDBNamesList
}
StatusWith<std::string> ShardingCatalogManagerImpl::addShard(
- OperationContext* txn,
+ OperationContext* opCtx,
const std::string* shardProposedName,
const ConnectionString& shardConnectionString,
const long long maxSize) {
@@ -522,12 +522,12 @@ StatusWith<std::string> ShardingCatalogManagerImpl::addShard(
}
// Only one addShard operation can be in progress at a time.
- Lock::ExclusiveLock lk(txn->lockState(), _kShardMembershipLock);
+ Lock::ExclusiveLock lk(opCtx->lockState(), _kShardMembershipLock);
// Check if this shard has already been added (can happen in the case of a retry after a network
// error, for example) and thus this addShard request should be considered a no-op.
auto existingShard =
- _checkIfShardExists(txn, shardConnectionString, shardProposedName, maxSize);
+ _checkIfShardExists(opCtx, shardConnectionString, shardProposedName, maxSize);
if (!existingShard.isOK()) {
return existingShard.getStatus();
}
@@ -536,7 +536,7 @@ StatusWith<std::string> ShardingCatalogManagerImpl::addShard(
// addShard request. Make sure to set the last optime for the client to the system last
// optime so that we'll still wait for replication so that this state is visible in the
// committed snapshot.
- repl::ReplClientInfo::forClient(txn->getClient()).setLastOpToSystemLastOpTime(txn);
+ repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx);
return existingShard.getValue()->getName();
}
@@ -547,15 +547,15 @@ StatusWith<std::string> ShardingCatalogManagerImpl::addShard(
// Note: This is necessary because as of 3.4, removeShard is performed by mongos (unlike
// addShard), so the ShardRegistry is not synchronously reloaded on the config server when a
// shard is removed.
- if (!Grid::get(txn)->shardRegistry()->reload(txn)) {
+ if (!Grid::get(opCtx)->shardRegistry()->reload(opCtx)) {
// If the first reload joined an existing one, call reload again to ensure the reload is
// fresh.
- Grid::get(txn)->shardRegistry()->reload(txn);
+ Grid::get(opCtx)->shardRegistry()->reload(opCtx);
}
// TODO: Don't create a detached Shard object, create a detached RemoteCommandTargeter instead.
const std::shared_ptr<Shard> shard{
- Grid::get(txn)->shardRegistry()->createConnection(shardConnectionString)};
+ Grid::get(opCtx)->shardRegistry()->createConnection(shardConnectionString)};
invariant(shard);
auto targeter = shard->getTargeter();
@@ -571,20 +571,20 @@ StatusWith<std::string> ShardingCatalogManagerImpl::addShard(
// Validate the specified connection string may serve as shard at all
auto shardStatus =
- _validateHostAsShard(txn, targeter, shardProposedName, shardConnectionString);
+ _validateHostAsShard(opCtx, targeter, shardProposedName, shardConnectionString);
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
ShardType& shardType = shardStatus.getValue();
// Check that none of the existing shard candidate's dbs exist already
- auto dbNamesStatus = _getDBNamesListFromShard(txn, targeter);
+ auto dbNamesStatus = _getDBNamesListFromShard(opCtx, targeter);
if (!dbNamesStatus.isOK()) {
return dbNamesStatus.getStatus();
}
for (const auto& dbName : dbNamesStatus.getValue()) {
- auto dbt = Grid::get(txn)->catalogClient(txn)->getDatabase(txn, dbName);
+ auto dbt = Grid::get(opCtx)->catalogClient(opCtx)->getDatabase(opCtx, dbName);
if (dbt.isOK()) {
const auto& dbDoc = dbt.getValue().value;
return Status(ErrorCodes::OperationFailed,
@@ -603,7 +603,7 @@ StatusWith<std::string> ShardingCatalogManagerImpl::addShard(
// If a name for a shard wasn't provided, generate one
if (shardType.getName().empty()) {
- auto result = generateNewShardName(txn);
+ auto result = generateNewShardName(opCtx);
if (!result.isOK()) {
return result.getStatus();
}
@@ -619,7 +619,7 @@ StatusWith<std::string> ShardingCatalogManagerImpl::addShard(
if (serverGlobalParams.featureCompatibility.version.load() ==
ServerGlobalParams::FeatureCompatibility::Version::k34) {
auto versionResponse =
- _runCommandForAddShard(txn,
+ _runCommandForAddShard(opCtx,
targeter.get(),
"admin",
BSON(FeatureCompatibilityVersion::kCommandName
@@ -640,12 +640,12 @@ StatusWith<std::string> ShardingCatalogManagerImpl::addShard(
}
if (!MONGO_FAIL_POINT(dontUpsertShardIdentityOnNewShards)) {
- auto commandRequest = createShardIdentityUpsertForAddShard(txn, shardType.getName());
+ auto commandRequest = createShardIdentityUpsertForAddShard(opCtx, shardType.getName());
LOG(2) << "going to insert shardIdentity document into shard: " << shardType;
auto swCommandResponse =
- _runCommandForAddShard(txn, targeter.get(), "admin", commandRequest);
+ _runCommandForAddShard(opCtx, targeter.get(), "admin", commandRequest);
if (!swCommandResponse.isOK()) {
return swCommandResponse.getStatus();
}
@@ -662,8 +662,11 @@ StatusWith<std::string> ShardingCatalogManagerImpl::addShard(
log() << "going to insert new entry for shard into config.shards: " << shardType.toString();
- Status result = Grid::get(txn)->catalogClient(txn)->insertConfigDocument(
- txn, ShardType::ConfigNS, shardType.toBSON(), ShardingCatalogClient::kMajorityWriteConcern);
+ Status result = Grid::get(opCtx)->catalogClient(opCtx)->insertConfigDocument(
+ opCtx,
+ ShardType::ConfigNS,
+ shardType.toBSON(),
+ ShardingCatalogClient::kMajorityWriteConcern);
if (!result.isOK()) {
log() << "error adding shard: " << shardType.toBSON() << " err: " << result.reason();
return result;
@@ -676,7 +679,7 @@ StatusWith<std::string> ShardingCatalogManagerImpl::addShard(
dbt.setPrimary(shardType.getName());
dbt.setSharded(false);
- Status status = Grid::get(txn)->catalogClient(txn)->updateDatabase(txn, dbName, dbt);
+ Status status = Grid::get(opCtx)->catalogClient(opCtx)->updateDatabase(opCtx, dbName, dbt);
if (!status.isOK()) {
log() << "adding shard " << shardConnectionString.toString()
<< " even though could not add database " << dbName;
@@ -688,12 +691,12 @@ StatusWith<std::string> ShardingCatalogManagerImpl::addShard(
shardDetails.append("name", shardType.getName());
shardDetails.append("host", shardConnectionString.toString());
- Grid::get(txn)->catalogClient(txn)->logChange(
- txn, "addShard", "", shardDetails.obj(), ShardingCatalogClient::kMajorityWriteConcern);
+ Grid::get(opCtx)->catalogClient(opCtx)->logChange(
+ opCtx, "addShard", "", shardDetails.obj(), ShardingCatalogClient::kMajorityWriteConcern);
// Ensure the added shard is visible to this process.
- auto shardRegistry = Grid::get(txn)->shardRegistry();
- if (!shardRegistry->getShard(txn, shardType.getName()).isOK()) {
+ auto shardRegistry = Grid::get(opCtx)->shardRegistry();
+ if (!shardRegistry->getShard(opCtx, shardType.getName()).isOK()) {
return {ErrorCodes::OperationFailed,
"Could not find shard metadata for shard after adding it. This most likely "
"indicates that the shard was removed immediately after it was added."};
@@ -708,13 +711,13 @@ void ShardingCatalogManagerImpl::appendConnectionStats(executor::ConnectionPoolS
}
BSONObj ShardingCatalogManagerImpl::createShardIdentityUpsertForAddShard(
- OperationContext* txn, const std::string& shardName) {
+ OperationContext* opCtx, const std::string& shardName) {
std::unique_ptr<BatchedUpdateDocument> updateDoc(new BatchedUpdateDocument());
BSONObjBuilder query;
query.append("_id", "shardIdentity");
query.append(ShardIdentityType::shardName(), shardName);
- query.append(ShardIdentityType::clusterId(), ClusterIdentityLoader::get(txn)->getClusterId());
+ query.append(ShardIdentityType::clusterId(), ClusterIdentityLoader::get(opCtx)->getClusterId());
updateDoc->setQuery(query.obj());
BSONObjBuilder update;
@@ -722,7 +725,7 @@ BSONObj ShardingCatalogManagerImpl::createShardIdentityUpsertForAddShard(
BSONObjBuilder set(update.subobjStart("$set"));
set.append(
ShardIdentityType::configsvrConnString(),
- repl::ReplicationCoordinator::get(txn)->getConfig().getConnectionString().toString());
+ repl::ReplicationCoordinator::get(opCtx)->getConfig().getConnectionString().toString());
}
updateDoc->setUpdateExpr(update.obj());
updateDoc->setUpsert(true);
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_zone_operations_impl.cpp b/src/mongo/s/catalog/sharding_catalog_manager_zone_operations_impl.cpp
index 6af584d3c30..b3e57ce1a0b 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_zone_operations_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_zone_operations_impl.cpp
@@ -58,7 +58,7 @@ const WriteConcernOptions kNoWaitWriteConcern(1, WriteConcernOptions::SyncMode::
* Note: range should have the full shard key.
* Returns ErrorCodes::RangeOverlapConflict is an overlap is detected.
*/
-Status checkForOveralappedZonedKeyRange(OperationContext* txn,
+Status checkForOveralappedZonedKeyRange(OperationContext* opCtx,
Shard* configServer,
const NamespaceString& ns,
const ChunkRange& range,
@@ -66,7 +66,7 @@ Status checkForOveralappedZonedKeyRange(OperationContext* txn,
const KeyPattern& shardKeyPattern) {
DistributionStatus chunkDist(ns, ShardToChunksMap{});
- auto tagStatus = configServer->exhaustiveFindOnConfig(txn,
+ auto tagStatus = configServer->exhaustiveFindOnConfig(opCtx,
kConfigPrimarySelector,
repl::ReadConcernLevel::kLocalReadConcern,
NamespaceString(TagsType::ConfigNS),
@@ -112,13 +112,13 @@ Status checkForOveralappedZonedKeyRange(OperationContext* txn,
* - ErrorCodes::ShardKeyNotFound if range is not compatible (for example, not a prefix of shard
* key) with the shard key of ns.
*/
-StatusWith<ChunkRange> includeFullShardKey(OperationContext* txn,
+StatusWith<ChunkRange> includeFullShardKey(OperationContext* opCtx,
Shard* configServer,
const NamespaceString& ns,
const ChunkRange& range,
KeyPattern* shardKeyPatternOut) {
auto findCollStatus =
- configServer->exhaustiveFindOnConfig(txn,
+ configServer->exhaustiveFindOnConfig(opCtx,
kConfigPrimarySelector,
repl::ReadConcernLevel::kLocalReadConcern,
NamespaceString(CollectionType::ConfigNS),
@@ -172,13 +172,13 @@ StatusWith<ChunkRange> includeFullShardKey(OperationContext* txn,
} // namespace
-Status ShardingCatalogManagerImpl::addShardToZone(OperationContext* txn,
+Status ShardingCatalogManagerImpl::addShardToZone(OperationContext* opCtx,
const std::string& shardName,
const std::string& zoneName) {
- Lock::ExclusiveLock lk(txn->lockState(), _kZoneOpLock);
+ Lock::ExclusiveLock lk(opCtx->lockState(), _kZoneOpLock);
- auto updateStatus = Grid::get(txn)->catalogClient(txn)->updateConfigDocument(
- txn,
+ auto updateStatus = Grid::get(opCtx)->catalogClient(opCtx)->updateConfigDocument(
+ opCtx,
ShardType::ConfigNS,
BSON(ShardType::name(shardName)),
BSON("$addToSet" << BSON(ShardType::tags() << zoneName)),
@@ -197,12 +197,12 @@ Status ShardingCatalogManagerImpl::addShardToZone(OperationContext* txn,
return Status::OK();
}
-Status ShardingCatalogManagerImpl::removeShardFromZone(OperationContext* txn,
+Status ShardingCatalogManagerImpl::removeShardFromZone(OperationContext* opCtx,
const std::string& shardName,
const std::string& zoneName) {
- Lock::ExclusiveLock lk(txn->lockState(), _kZoneOpLock);
+ Lock::ExclusiveLock lk(opCtx->lockState(), _kZoneOpLock);
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
const NamespaceString shardNS(ShardType::ConfigNS);
//
@@ -210,7 +210,7 @@ Status ShardingCatalogManagerImpl::removeShardFromZone(OperationContext* txn,
//
auto findShardExistsStatus =
- configShard->exhaustiveFindOnConfig(txn,
+ configShard->exhaustiveFindOnConfig(opCtx,
kConfigPrimarySelector,
repl::ReadConcernLevel::kLocalReadConcern,
shardNS,
@@ -232,7 +232,7 @@ Status ShardingCatalogManagerImpl::removeShardFromZone(OperationContext* txn,
//
auto findShardStatus =
- configShard->exhaustiveFindOnConfig(txn,
+ configShard->exhaustiveFindOnConfig(opCtx,
kConfigPrimarySelector,
repl::ReadConcernLevel::kLocalReadConcern,
shardNS,
@@ -265,7 +265,7 @@ Status ShardingCatalogManagerImpl::removeShardFromZone(OperationContext* txn,
}
auto findChunkRangeStatus =
- configShard->exhaustiveFindOnConfig(txn,
+ configShard->exhaustiveFindOnConfig(opCtx,
kConfigPrimarySelector,
repl::ReadConcernLevel::kLocalReadConcern,
NamespaceString(TagsType::ConfigNS),
@@ -287,8 +287,8 @@ Status ShardingCatalogManagerImpl::removeShardFromZone(OperationContext* txn,
// Perform update.
//
- auto updateStatus = Grid::get(txn)->catalogClient(txn)->updateConfigDocument(
- txn,
+ auto updateStatus = Grid::get(opCtx)->catalogClient(opCtx)->updateConfigDocument(
+ opCtx,
ShardType::ConfigNS,
BSON(ShardType::name(shardName)),
BSON("$pull" << BSON(ShardType::tags() << zoneName)),
@@ -309,17 +309,17 @@ Status ShardingCatalogManagerImpl::removeShardFromZone(OperationContext* txn,
}
-Status ShardingCatalogManagerImpl::assignKeyRangeToZone(OperationContext* txn,
+Status ShardingCatalogManagerImpl::assignKeyRangeToZone(OperationContext* opCtx,
const NamespaceString& ns,
const ChunkRange& givenRange,
const std::string& zoneName) {
- Lock::ExclusiveLock lk(txn->lockState(), _kZoneOpLock);
+ Lock::ExclusiveLock lk(opCtx->lockState(), _kZoneOpLock);
- auto configServer = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configServer = Grid::get(opCtx)->shardRegistry()->getConfigShard();
KeyPattern shardKeyPattern{BSONObj()};
auto fullShardKeyStatus =
- includeFullShardKey(txn, configServer.get(), ns, givenRange, &shardKeyPattern);
+ includeFullShardKey(opCtx, configServer.get(), ns, givenRange, &shardKeyPattern);
if (!fullShardKeyStatus.isOK()) {
return fullShardKeyStatus.getStatus();
}
@@ -327,7 +327,7 @@ Status ShardingCatalogManagerImpl::assignKeyRangeToZone(OperationContext* txn,
const auto& fullShardKeyRange = fullShardKeyStatus.getValue();
auto zoneExistStatus =
- configServer->exhaustiveFindOnConfig(txn,
+ configServer->exhaustiveFindOnConfig(opCtx,
kConfigPrimarySelector,
repl::ReadConcernLevel::kLocalReadConcern,
NamespaceString(ShardType::ConfigNS),
@@ -346,7 +346,7 @@ Status ShardingCatalogManagerImpl::assignKeyRangeToZone(OperationContext* txn,
}
auto overlapStatus = checkForOveralappedZonedKeyRange(
- txn, configServer.get(), ns, fullShardKeyRange, zoneName, shardKeyPattern);
+ opCtx, configServer.get(), ns, fullShardKeyRange, zoneName, shardKeyPattern);
if (!overlapStatus.isOK()) {
return overlapStatus;
}
@@ -362,8 +362,8 @@ Status ShardingCatalogManagerImpl::assignKeyRangeToZone(OperationContext* txn,
updateBuilder.append(TagsType::max(), fullShardKeyRange.getMax());
updateBuilder.append(TagsType::tag(), zoneName);
- auto updateStatus = Grid::get(txn)->catalogClient(txn)->updateConfigDocument(
- txn, TagsType::ConfigNS, updateQuery, updateBuilder.obj(), true, kNoWaitWriteConcern);
+ auto updateStatus = Grid::get(opCtx)->catalogClient(opCtx)->updateConfigDocument(
+ opCtx, TagsType::ConfigNS, updateQuery, updateBuilder.obj(), true, kNoWaitWriteConcern);
if (!updateStatus.isOK()) {
return updateStatus.getStatus();
@@ -372,16 +372,16 @@ Status ShardingCatalogManagerImpl::assignKeyRangeToZone(OperationContext* txn,
return Status::OK();
}
-Status ShardingCatalogManagerImpl::removeKeyRangeFromZone(OperationContext* txn,
+Status ShardingCatalogManagerImpl::removeKeyRangeFromZone(OperationContext* opCtx,
const NamespaceString& ns,
const ChunkRange& range) {
- Lock::ExclusiveLock lk(txn->lockState(), _kZoneOpLock);
+ Lock::ExclusiveLock lk(opCtx->lockState(), _kZoneOpLock);
- auto configServer = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configServer = Grid::get(opCtx)->shardRegistry()->getConfigShard();
KeyPattern shardKeyPattern{BSONObj()};
auto fullShardKeyStatus =
- includeFullShardKey(txn, configServer.get(), ns, range, &shardKeyPattern);
+ includeFullShardKey(opCtx, configServer.get(), ns, range, &shardKeyPattern);
if (!fullShardKeyStatus.isOK()) {
return fullShardKeyStatus.getStatus();
}
@@ -390,8 +390,8 @@ Status ShardingCatalogManagerImpl::removeKeyRangeFromZone(OperationContext* txn,
removeBuilder.append("_id", BSON(TagsType::ns(ns.ns()) << TagsType::min(range.getMin())));
removeBuilder.append(TagsType::max(), range.getMax());
- return Grid::get(txn)->catalogClient(txn)->removeConfigDocuments(
- txn, TagsType::ConfigNS, removeBuilder.obj(), kNoWaitWriteConcern);
+ return Grid::get(opCtx)->catalogClient(opCtx)->removeConfigDocuments(
+ opCtx, TagsType::ConfigNS, removeBuilder.obj(), kNoWaitWriteConcern);
}
} // namespace mongo
diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp
index b8cec8396de..d2c8eaf5504 100644
--- a/src/mongo/s/catalog_cache.cpp
+++ b/src/mongo/s/catalog_cache.cpp
@@ -45,7 +45,7 @@ CatalogCache::CatalogCache() = default;
CatalogCache::~CatalogCache() = default;
-StatusWith<std::shared_ptr<DBConfig>> CatalogCache::getDatabase(OperationContext* txn,
+StatusWith<std::shared_ptr<DBConfig>> CatalogCache::getDatabase(OperationContext* opCtx,
StringData dbName) {
stdx::lock_guard<stdx::mutex> guard(_mutex);
@@ -55,7 +55,7 @@ StatusWith<std::shared_ptr<DBConfig>> CatalogCache::getDatabase(OperationContext
}
// Need to load from the store
- auto status = Grid::get(txn)->catalogClient(txn)->getDatabase(txn, dbName.toString());
+ auto status = Grid::get(opCtx)->catalogClient(opCtx)->getDatabase(opCtx, dbName.toString());
if (!status.isOK()) {
return status.getStatus();
}
@@ -63,7 +63,7 @@ StatusWith<std::shared_ptr<DBConfig>> CatalogCache::getDatabase(OperationContext
const auto& dbOpTimePair = status.getValue();
auto db = std::make_shared<DBConfig>(dbOpTimePair.value, dbOpTimePair.opTime);
try {
- db->load(txn);
+ db->load(opCtx);
auto emplaceResult = _databases.try_emplace(dbName, std::move(db));
return emplaceResult.first->second;
} catch (const DBException& ex) {
diff --git a/src/mongo/s/catalog_cache.h b/src/mongo/s/catalog_cache.h
index 8d30c1aebf0..0e63f94b52a 100644
--- a/src/mongo/s/catalog_cache.h
+++ b/src/mongo/s/catalog_cache.h
@@ -62,7 +62,7 @@ public:
*
* Returns the database cache entry if the database exists or a failed status otherwise.
*/
- StatusWith<std::shared_ptr<DBConfig>> getDatabase(OperationContext* txn, StringData dbName);
+ StatusWith<std::shared_ptr<DBConfig>> getDatabase(OperationContext* opCtx, StringData dbName);
/**
* Removes the database information for the specified name from the cache, so that the
diff --git a/src/mongo/s/chunk_diff.cpp b/src/mongo/s/chunk_diff.cpp
index 90ec41b8923..f21555043ad 100644
--- a/src/mongo/s/chunk_diff.cpp
+++ b/src/mongo/s/chunk_diff.cpp
@@ -90,7 +90,7 @@ typename ConfigDiffTracker<ValType>::RangeOverlap ConfigDiffTracker<ValType>::_o
}
template <class ValType>
-int ConfigDiffTracker<ValType>::calculateConfigDiff(OperationContext* txn,
+int ConfigDiffTracker<ValType>::calculateConfigDiff(OperationContext* opCtx,
const std::vector<ChunkType>& chunks) {
// Apply the chunk changes to the ranges and versions
//
@@ -129,7 +129,7 @@ int ConfigDiffTracker<ValType>::calculateConfigDiff(OperationContext* txn,
}
// Chunk version changes
- ShardId shard = shardFor(txn, chunk.getShard());
+ ShardId shard = shardFor(opCtx, chunk.getShard());
typename MaxChunkVersionMap::const_iterator shardVersionIt = _maxShardVersions->find(shard);
if (shardVersionIt == _maxShardVersions->end() || shardVersionIt->second < chunkVersion) {
@@ -164,7 +164,7 @@ int ConfigDiffTracker<ValType>::calculateConfigDiff(OperationContext* txn,
return -1;
}
- _currMap->insert(rangeFor(txn, chunk));
+ _currMap->insert(rangeFor(opCtx, chunk));
}
return _validDiffs;
diff --git a/src/mongo/s/chunk_diff.h b/src/mongo/s/chunk_diff.h
index 9ea6ed5b62e..0cea9fa678a 100644
--- a/src/mongo/s/chunk_diff.h
+++ b/src/mongo/s/chunk_diff.h
@@ -103,7 +103,7 @@ public:
// Applies changes to the config data from a vector of chunks passed in. Also includes minor
// version changes for particular major-version chunks if explicitly specified.
// Returns the number of diffs processed, or -1 if the diffs were inconsistent.
- int calculateConfigDiff(OperationContext* txn, const std::vector<ChunkType>& chunks);
+ int calculateConfigDiff(OperationContext* opCtx, const std::vector<ChunkType>& chunks);
protected:
/**
@@ -119,10 +119,10 @@ protected:
return true;
}
- virtual std::pair<BSONObj, ValType> rangeFor(OperationContext* txn,
+ virtual std::pair<BSONObj, ValType> rangeFor(OperationContext* opCtx,
const ChunkType& chunk) const = 0;
- virtual ShardId shardFor(OperationContext* txn, const ShardId& name) const = 0;
+ virtual ShardId shardFor(OperationContext* opCtx, const ShardId& name) const = 0;
private:
// Whether or not a range exists in the min/max region
diff --git a/src/mongo/s/chunk_diff_test.cpp b/src/mongo/s/chunk_diff_test.cpp
index fee3d67743e..888bf8f1809 100644
--- a/src/mongo/s/chunk_diff_test.cpp
+++ b/src/mongo/s/chunk_diff_test.cpp
@@ -64,11 +64,11 @@ public:
return true;
}
- virtual pair<BSONObj, BSONObj> rangeFor(OperationContext* txn, const ChunkType& chunk) const {
+ virtual pair<BSONObj, BSONObj> rangeFor(OperationContext* opCtx, const ChunkType& chunk) const {
return make_pair(chunk.getMin(), chunk.getMax());
}
- virtual ShardId shardFor(OperationContext* txn, const ShardId& name) const {
+ virtual ShardId shardFor(OperationContext* opCtx, const ShardId& name) const {
return name;
}
};
@@ -84,7 +84,7 @@ public:
return false;
}
- virtual pair<BSONObj, BSONObj> rangeFor(OperationContext* txn, const ChunkType& chunk) const {
+ virtual pair<BSONObj, BSONObj> rangeFor(OperationContext* opCtx, const ChunkType& chunk) const {
return make_pair(chunk.getMax(), chunk.getMin());
}
};
diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp
index 75f2cc7c11c..20cfd7e098f 100644
--- a/src/mongo/s/chunk_manager.cpp
+++ b/src/mongo/s/chunk_manager.cpp
@@ -87,13 +87,14 @@ public:
return false;
}
- pair<BSONObj, shared_ptr<Chunk>> rangeFor(OperationContext* txn,
+ pair<BSONObj, shared_ptr<Chunk>> rangeFor(OperationContext* opCtx,
const ChunkType& chunk) const final {
return std::make_pair(chunk.getMax(), std::make_shared<Chunk>(chunk));
}
- ShardId shardFor(OperationContext* txn, const ShardId& shardId) const final {
- const auto shard = uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, shardId));
+ ShardId shardFor(OperationContext* opCtx, const ShardId& shardId) const final {
+ const auto shard =
+ uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId));
return shard->getId();
}
@@ -170,7 +171,7 @@ ChunkManager::ChunkManager(NamespaceString nss,
ChunkManager::~ChunkManager() = default;
-void ChunkManager::loadExistingRanges(OperationContext* txn, const ChunkManager* oldManager) {
+void ChunkManager::loadExistingRanges(OperationContext* opCtx, const ChunkManager* oldManager) {
invariant(!_version.isSet());
int tries = 3;
@@ -187,7 +188,7 @@ void ChunkManager::loadExistingRanges(OperationContext* txn, const ChunkManager*
<< " sequenceNumber: " << _sequenceNumber
<< " based on: " << (oldManager ? oldManager->getVersion().toString() : "(empty)");
- if (_load(txn, chunkMap, shardIds, &shardVersions, oldManager)) {
+ if (_load(opCtx, chunkMap, shardIds, &shardVersions, oldManager)) {
// TODO: Merge into diff code above, so we validate in one place
if (isChunkMapValid(chunkMap)) {
_chunkMap = std::move(chunkMap);
@@ -213,7 +214,7 @@ void ChunkManager::loadExistingRanges(OperationContext* txn, const ChunkManager*
<< " after 3 attempts. Please try again.");
}
-bool ChunkManager::_load(OperationContext* txn,
+bool ChunkManager::_load(OperationContext* opCtx,
ChunkMap& chunkMap,
set<ShardId>& shardIds,
ShardVersionMap* shardVersions,
@@ -252,8 +253,8 @@ bool ChunkManager::_load(OperationContext* txn,
// Diff tracker should *always* find at least one chunk if collection exists
repl::OpTime opTime;
std::vector<ChunkType> chunks;
- uassertStatusOK(Grid::get(txn)->catalogClient(txn)->getChunks(
- txn,
+ uassertStatusOK(Grid::get(opCtx)->catalogClient(opCtx)->getChunks(
+ opCtx,
diffQuery.query,
diffQuery.sort,
boost::none,
@@ -264,14 +265,14 @@ bool ChunkManager::_load(OperationContext* txn,
invariant(opTime >= _configOpTime);
_configOpTime = opTime;
- int diffsApplied = differ.calculateConfigDiff(txn, chunks);
+ int diffsApplied = differ.calculateConfigDiff(opCtx, chunks);
if (diffsApplied > 0) {
LOG(2) << "loaded " << diffsApplied << " chunks into new chunk manager for " << _nss
<< " with version " << _version;
// Add all existing shards we find to the shards set
for (ShardVersionMap::iterator it = shardVersions->begin(); it != shardVersions->end();) {
- auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, it->first);
+ auto shardStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, it->first);
if (shardStatus.isOK()) {
shardIds.insert(it->first);
++it;
@@ -347,7 +348,7 @@ std::shared_ptr<Chunk> ChunkManager::findIntersectingChunkWithSimpleCollation(
return findIntersectingChunk(shardKey, CollationSpec::kSimpleSpec);
}
-void ChunkManager::getShardIdsForQuery(OperationContext* txn,
+void ChunkManager::getShardIdsForQuery(OperationContext* opCtx,
const BSONObj& query,
const BSONObj& collation,
set<ShardId>* shardIds) const {
@@ -360,8 +361,8 @@ void ChunkManager::getShardIdsForQuery(OperationContext* txn,
qr->setCollation(_defaultCollator->getSpec().toBSON());
}
- std::unique_ptr<CanonicalQuery> cq =
- uassertStatusOK(CanonicalQuery::canonicalize(txn, std::move(qr), ExtensionsCallbackNoop()));
+ std::unique_ptr<CanonicalQuery> cq = uassertStatusOK(
+ CanonicalQuery::canonicalize(opCtx, std::move(qr), ExtensionsCallbackNoop()));
// Query validation
if (QueryPlannerCommon::hasNode(cq->root(), MatchExpression::GEO_NEAR)) {
diff --git a/src/mongo/s/chunk_manager.h b/src/mongo/s/chunk_manager.h
index 08d1d9fe229..365d4d5df62 100644
--- a/src/mongo/s/chunk_manager.h
+++ b/src/mongo/s/chunk_manager.h
@@ -103,7 +103,7 @@ public:
}
// Loads existing ranges based on info in chunk manager
- void loadExistingRanges(OperationContext* txn, const ChunkManager* oldManager);
+ void loadExistingRanges(OperationContext* opCtx, const ChunkManager* oldManager);
//
// Methods to use once loaded / created
@@ -133,7 +133,7 @@ public:
* Finds the shard IDs for a given filter and collation. If collation is empty, we use the
* collection default collation for targeting.
*/
- void getShardIdsForQuery(OperationContext* txn,
+ void getShardIdsForQuery(OperationContext* opCtx,
const BSONObj& query,
const BSONObj& collation,
std::set<ShardId>* shardIds) const;
@@ -218,7 +218,7 @@ private:
* _chunkRangeMap are consistent with each other. If false is returned, it is not safe to use
* the chunk manager anymore.
*/
- bool _load(OperationContext* txn,
+ bool _load(OperationContext* opCtx,
ChunkMap& chunks,
std::set<ShardId>& shardIds,
ShardVersionMap* shardVersions,
diff --git a/src/mongo/s/client/parallel.cpp b/src/mongo/s/client/parallel.cpp
index 1b16dee2032..89d2836dc92 100644
--- a/src/mongo/s/client/parallel.cpp
+++ b/src/mongo/s/client/parallel.cpp
@@ -211,13 +211,13 @@ ParallelSortClusteredCursor::~ParallelSortClusteredCursor() {
_done = true;
}
-void ParallelSortClusteredCursor::init(OperationContext* txn) {
+void ParallelSortClusteredCursor::init(OperationContext* opCtx) {
if (_didInit)
return;
_didInit = true;
if (!_qSpec.isEmpty()) {
- fullInit(txn);
+ fullInit(opCtx);
} else {
// You can only get here by using the legacy constructor
// TODO: Eliminate this
@@ -316,17 +316,17 @@ void ParallelSortClusteredCursor::_finishCons() {
17306, "have to have all text meta sort keys in projection", textMetaSortKeyFields.empty());
}
-void ParallelSortClusteredCursor::fullInit(OperationContext* txn) {
- startInit(txn);
- finishInit(txn);
+void ParallelSortClusteredCursor::fullInit(OperationContext* opCtx) {
+ startInit(opCtx);
+ finishInit(opCtx);
}
-void ParallelSortClusteredCursor::_markStaleNS(OperationContext* txn,
+void ParallelSortClusteredCursor::_markStaleNS(OperationContext* opCtx,
const NamespaceString& staleNS,
const StaleConfigException& e,
bool& forceReload) {
if (e.requiresFullReload()) {
- Grid::get(txn)->catalogCache()->invalidate(staleNS.db());
+ Grid::get(opCtx)->catalogCache()->invalidate(staleNS.db());
}
if (_staleNSMap.find(staleNS.ns()) == _staleNSMap.end())
@@ -344,10 +344,10 @@ void ParallelSortClusteredCursor::_markStaleNS(OperationContext* txn,
forceReload = tries > 2;
}
-void ParallelSortClusteredCursor::_handleStaleNS(OperationContext* txn,
+void ParallelSortClusteredCursor::_handleStaleNS(OperationContext* opCtx,
const NamespaceString& staleNS,
bool forceReload) {
- auto scopedCMStatus = ScopedChunkManager::get(txn, staleNS);
+ auto scopedCMStatus = ScopedChunkManager::get(opCtx, staleNS);
if (!scopedCMStatus.isOK()) {
log() << "cannot reload database info for stale namespace " << staleNS.ns();
return;
@@ -356,11 +356,11 @@ void ParallelSortClusteredCursor::_handleStaleNS(OperationContext* txn,
const auto& scopedCM = scopedCMStatus.getValue();
// Reload chunk manager, potentially forcing the namespace
- scopedCM.db()->getChunkManagerIfExists(txn, staleNS.ns(), true, forceReload);
+ scopedCM.db()->getChunkManagerIfExists(opCtx, staleNS.ns(), true, forceReload);
}
void ParallelSortClusteredCursor::setupVersionAndHandleSlaveOk(
- OperationContext* txn,
+ OperationContext* opCtx,
std::shared_ptr<ParallelConnectionState> state,
const ShardId& shardId,
std::shared_ptr<Shard> primary,
@@ -377,7 +377,8 @@ void ParallelSortClusteredCursor::setupVersionAndHandleSlaveOk(
// Setup conn
if (!state->conn) {
- const auto shard = uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, shardId));
+ const auto shard =
+ uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId));
state->conn.reset(new ShardConnection(shard->getConnString(), ns.ns(), manager));
}
@@ -440,7 +441,7 @@ void ParallelSortClusteredCursor::setupVersionAndHandleSlaveOk(
}
}
-void ParallelSortClusteredCursor::startInit(OperationContext* txn) {
+void ParallelSortClusteredCursor::startInit(OperationContext* opCtx) {
const bool returnPartial = (_qSpec.options() & QueryOption_PartialResults);
const NamespaceString nss(!_cInfo.isEmpty() ? _cInfo.versionedNS : _qSpec.ns());
@@ -458,7 +459,7 @@ void ParallelSortClusteredCursor::startInit(OperationContext* txn) {
shared_ptr<Shard> primary;
{
- auto scopedCMStatus = ScopedChunkManager::get(txn, nss);
+ auto scopedCMStatus = ScopedChunkManager::get(opCtx, nss);
if (scopedCMStatus != ErrorCodes::NamespaceNotFound) {
uassertStatusOK(scopedCMStatus.getStatus());
const auto& scopedCM = scopedCMStatus.getValue();
@@ -476,7 +477,7 @@ void ParallelSortClusteredCursor::startInit(OperationContext* txn) {
<< manager->getVersion().toString() << "]";
}
- manager->getShardIdsForQuery(txn,
+ manager->getShardIdsForQuery(opCtx,
!_cInfo.isEmpty() ? _cInfo.cmdFilter : _qSpec.filter(),
!_cInfo.isEmpty() ? _cInfo.cmdCollation : BSONObj(),
&shardIds);
@@ -551,7 +552,7 @@ void ParallelSortClusteredCursor::startInit(OperationContext* txn) {
mdata.pcState = std::make_shared<ParallelConnectionState>();
auto state = mdata.pcState;
- setupVersionAndHandleSlaveOk(txn, state, shardId, primary, nss, vinfo, manager);
+ setupVersionAndHandleSlaveOk(opCtx, state, shardId, primary, nss, vinfo, manager);
const string& ns = _qSpec.ns();
@@ -643,7 +644,7 @@ void ParallelSortClusteredCursor::startInit(OperationContext* txn) {
// Probably need to retry fully
bool forceReload;
- _markStaleNS(txn, staleNS, e, forceReload);
+ _markStaleNS(opCtx, staleNS, e, forceReload);
LOG(1) << "stale config of ns " << staleNS
<< " during initialization, will retry with forced : " << forceReload
@@ -654,10 +655,10 @@ void ParallelSortClusteredCursor::startInit(OperationContext* txn) {
warning() << "versioned ns " << nss.ns() << " doesn't match stale config namespace "
<< staleNS;
- _handleStaleNS(txn, staleNS, forceReload);
+ _handleStaleNS(opCtx, staleNS, forceReload);
// Restart with new chunk manager
- startInit(txn);
+ startInit(opCtx);
return;
} catch (SocketException& e) {
warning() << "socket exception when initializing on " << shardId
@@ -727,7 +728,7 @@ void ParallelSortClusteredCursor::startInit(OperationContext* txn) {
}
}
-void ParallelSortClusteredCursor::finishInit(OperationContext* txn) {
+void ParallelSortClusteredCursor::finishInit(OperationContext* opCtx) {
bool returnPartial = (_qSpec.options() & QueryOption_PartialResults);
bool specialVersion = _cInfo.versionedNS.size() > 0;
string ns = specialVersion ? _cInfo.versionedNS : _qSpec.ns();
@@ -867,7 +868,7 @@ void ParallelSortClusteredCursor::finishInit(OperationContext* txn) {
const StaleConfigException& exception = i->second;
bool forceReload;
- _markStaleNS(txn, staleNS, exception, forceReload);
+ _markStaleNS(opCtx, staleNS, exception, forceReload);
LOG(1) << "stale config of ns " << staleNS
<< " on finishing query, will retry with forced : " << forceReload
@@ -878,13 +879,13 @@ void ParallelSortClusteredCursor::finishInit(OperationContext* txn) {
warning() << "versioned ns " << ns << " doesn't match stale config namespace "
<< staleNS;
- _handleStaleNS(txn, staleNS, forceReload);
+ _handleStaleNS(opCtx, staleNS, forceReload);
}
}
// Re-establish connections we need to
- startInit(txn);
- finishInit(txn);
+ startInit(opCtx);
+ finishInit(opCtx);
return;
}
@@ -924,7 +925,8 @@ void ParallelSortClusteredCursor::finishInit(OperationContext* txn) {
_cursors[index].reset(mdata.pcState->cursor.get(), &mdata);
- const auto shard = uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, shardId));
+ const auto shard =
+ uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId));
_servers.insert(shard->getConnString().toString());
index++;
diff --git a/src/mongo/s/client/parallel.h b/src/mongo/s/client/parallel.h
index aeb709f0ffd..d375858bae0 100644
--- a/src/mongo/s/client/parallel.h
+++ b/src/mongo/s/client/parallel.h
@@ -91,7 +91,7 @@ public:
~ParallelSortClusteredCursor();
- void init(OperationContext* txn);
+ void init(OperationContext* opCtx);
bool more();
@@ -107,9 +107,9 @@ public:
private:
using ShardCursorsMap = std::map<ShardId, ParallelConnectionMetadata>;
- void fullInit(OperationContext* txn);
- void startInit(OperationContext* txn);
- void finishInit(OperationContext* txn);
+ void fullInit(OperationContext* opCtx);
+ void startInit(OperationContext* opCtx);
+ void finishInit(OperationContext* opCtx);
bool isCommand() {
return NamespaceString(_qSpec.ns()).isCommand();
@@ -117,11 +117,11 @@ private:
void _finishCons();
- void _markStaleNS(OperationContext* txn,
+ void _markStaleNS(OperationContext* opCtx,
const NamespaceString& staleNS,
const StaleConfigException& e,
bool& forceReload);
- void _handleStaleNS(OperationContext* txn, const NamespaceString& staleNS, bool forceReload);
+ void _handleStaleNS(OperationContext* opCtx, const NamespaceString& staleNS, bool forceReload);
bool _didInit;
bool _done;
@@ -150,7 +150,7 @@ private:
* set connection and the primary cannot be reached, the version
* will not be set if the slaveOk flag is set.
*/
- void setupVersionAndHandleSlaveOk(OperationContext* txn,
+ void setupVersionAndHandleSlaveOk(OperationContext* opCtx,
std::shared_ptr<ParallelConnectionState> state /* in & out */,
const ShardId& shardId,
std::shared_ptr<Shard> primary /* in */,
diff --git a/src/mongo/s/client/shard.cpp b/src/mongo/s/client/shard.cpp
index be61c500604..0702cdc91a5 100644
--- a/src/mongo/s/client/shard.cpp
+++ b/src/mongo/s/client/shard.cpp
@@ -110,27 +110,27 @@ bool Shard::isConfig() const {
return _id == "config";
}
-StatusWith<Shard::CommandResponse> Shard::runCommand(OperationContext* txn,
+StatusWith<Shard::CommandResponse> Shard::runCommand(OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const std::string& dbName,
const BSONObj& cmdObj,
RetryPolicy retryPolicy) {
- return runCommand(txn, readPref, dbName, cmdObj, Milliseconds::max(), retryPolicy);
+ return runCommand(opCtx, readPref, dbName, cmdObj, Milliseconds::max(), retryPolicy);
}
-StatusWith<Shard::CommandResponse> Shard::runCommand(OperationContext* txn,
+StatusWith<Shard::CommandResponse> Shard::runCommand(OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const std::string& dbName,
const BSONObj& cmdObj,
Milliseconds maxTimeMSOverride,
RetryPolicy retryPolicy) {
while (true) {
- auto interruptStatus = txn->checkForInterruptNoAssert();
+ auto interruptStatus = opCtx->checkForInterruptNoAssert();
if (!interruptStatus.isOK()) {
return interruptStatus;
}
- auto hostWithResponse = _runCommand(txn, readPref, dbName, maxTimeMSOverride, cmdObj);
+ auto hostWithResponse = _runCommand(opCtx, readPref, dbName, maxTimeMSOverride, cmdObj);
auto swCmdResponse = std::move(hostWithResponse.commandResponse);
auto commandStatus = _getEffectiveCommandStatus(swCmdResponse);
@@ -147,29 +147,29 @@ StatusWith<Shard::CommandResponse> Shard::runCommand(OperationContext* txn,
}
StatusWith<Shard::CommandResponse> Shard::runCommandWithFixedRetryAttempts(
- OperationContext* txn,
+ OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const std::string& dbName,
const BSONObj& cmdObj,
RetryPolicy retryPolicy) {
return runCommandWithFixedRetryAttempts(
- txn, readPref, dbName, cmdObj, Milliseconds::max(), retryPolicy);
+ opCtx, readPref, dbName, cmdObj, Milliseconds::max(), retryPolicy);
}
StatusWith<Shard::CommandResponse> Shard::runCommandWithFixedRetryAttempts(
- OperationContext* txn,
+ OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const std::string& dbName,
const BSONObj& cmdObj,
Milliseconds maxTimeMSOverride,
RetryPolicy retryPolicy) {
for (int retry = 1; retry <= kOnErrorNumRetries; ++retry) {
- auto interruptStatus = txn->checkForInterruptNoAssert();
+ auto interruptStatus = opCtx->checkForInterruptNoAssert();
if (!interruptStatus.isOK()) {
return interruptStatus;
}
- auto hostWithResponse = _runCommand(txn, readPref, dbName, maxTimeMSOverride, cmdObj);
+ auto hostWithResponse = _runCommand(opCtx, readPref, dbName, maxTimeMSOverride, cmdObj);
auto swCmdResponse = std::move(hostWithResponse.commandResponse);
auto commandStatus = _getEffectiveCommandStatus(swCmdResponse);
@@ -186,7 +186,7 @@ StatusWith<Shard::CommandResponse> Shard::runCommandWithFixedRetryAttempts(
}
BatchedCommandResponse Shard::runBatchWriteCommandOnConfig(
- OperationContext* txn, const BatchedCommandRequest& batchRequest, RetryPolicy retryPolicy) {
+ OperationContext* opCtx, const BatchedCommandRequest& batchRequest, RetryPolicy retryPolicy) {
invariant(isConfig());
const std::string dbname = batchRequest.getNS().db().toString();
@@ -195,7 +195,7 @@ BatchedCommandResponse Shard::runBatchWriteCommandOnConfig(
const BSONObj cmdObj = batchRequest.toBSON();
for (int retry = 1; retry <= kOnErrorNumRetries; ++retry) {
- auto response = _runCommand(txn,
+ auto response = _runCommand(opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
dbname,
kDefaultConfigCommandTimeout,
@@ -221,7 +221,7 @@ BatchedCommandResponse Shard::runBatchWriteCommandOnConfig(
}
StatusWith<Shard::QueryResponse> Shard::exhaustiveFindOnConfig(
- OperationContext* txn,
+ OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const repl::ReadConcernLevel& readConcernLevel,
const NamespaceString& nss,
@@ -233,7 +233,7 @@ StatusWith<Shard::QueryResponse> Shard::exhaustiveFindOnConfig(
for (int retry = 1; retry <= kOnErrorNumRetries; retry++) {
auto result =
- _exhaustiveFindOnConfig(txn, readPref, readConcernLevel, nss, query, sort, limit);
+ _exhaustiveFindOnConfig(opCtx, readPref, readConcernLevel, nss, query, sort, limit);
if (retry < kOnErrorNumRetries &&
isRetriableError(result.getStatus().code(), RetryPolicy::kIdempotent)) {
diff --git a/src/mongo/s/client/shard.h b/src/mongo/s/client/shard.h
index 26413b61c6d..a322af1bcf1 100644
--- a/src/mongo/s/client/shard.h
+++ b/src/mongo/s/client/shard.h
@@ -142,7 +142,7 @@ public:
* given "retryPolicy". Retries indefinitely until/unless a non-retriable error is encountered,
* the maxTimeMs on the OperationContext expires, or the operation is interrupted.
*/
- StatusWith<CommandResponse> runCommand(OperationContext* txn,
+ StatusWith<CommandResponse> runCommand(OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const std::string& dbName,
const BSONObj& cmdObj,
@@ -153,7 +153,7 @@ public:
* Runs for the lesser of the remaining time on the operation context or the specified maxTimeMS
* override.
*/
- StatusWith<CommandResponse> runCommand(OperationContext* txn,
+ StatusWith<CommandResponse> runCommand(OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const std::string& dbName,
const BSONObj& cmdObj,
@@ -166,7 +166,7 @@ public:
* Wherever possible this method should be avoided in favor of runCommand.
*/
StatusWith<CommandResponse> runCommandWithFixedRetryAttempts(
- OperationContext* txn,
+ OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const std::string& dbName,
const BSONObj& cmdObj,
@@ -178,7 +178,7 @@ public:
* Wherever possible this method should be avoided in favor of runCommand.
*/
StatusWith<CommandResponse> runCommandWithFixedRetryAttempts(
- OperationContext* txn,
+ OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const std::string& dbName,
const BSONObj& cmdObj,
@@ -189,7 +189,7 @@ public:
* Expects a single-entry batch wrtie command and runs it on the config server's primary using
* the specified retry policy.
*/
- BatchedCommandResponse runBatchWriteCommandOnConfig(OperationContext* txn,
+ BatchedCommandResponse runBatchWriteCommandOnConfig(OperationContext* opCtx,
const BatchedCommandRequest& batchRequest,
RetryPolicy retryPolicy);
@@ -201,7 +201,7 @@ public:
* ShardRemote instances expect "readConcernLevel" to always be kMajorityReadConcern, whereas
* ShardLocal instances expect either kLocalReadConcern or kMajorityReadConcern.
*/
- StatusWith<QueryResponse> exhaustiveFindOnConfig(OperationContext* txn,
+ StatusWith<QueryResponse> exhaustiveFindOnConfig(OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const repl::ReadConcernLevel& readConcernLevel,
const NamespaceString& nss,
@@ -214,7 +214,7 @@ public:
* so long as the options are the same.
* NOTE: Currently only supported for LocalShard.
*/
- virtual Status createIndexOnConfig(OperationContext* txn,
+ virtual Status createIndexOnConfig(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& keys,
bool unique) = 0;
@@ -252,14 +252,14 @@ private:
*
* NOTE: LocalShard implementation will not return a valid host and so should be ignored.
*/
- virtual HostWithResponse _runCommand(OperationContext* txn,
+ virtual HostWithResponse _runCommand(OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const std::string& dbname,
Milliseconds maxTimeMSOverride,
const BSONObj& cmdObj) = 0;
virtual StatusWith<QueryResponse> _exhaustiveFindOnConfig(
- OperationContext* txn,
+ OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const repl::ReadConcernLevel& readConcernLevel,
const NamespaceString& nss,
diff --git a/src/mongo/s/client/shard_connection.cpp b/src/mongo/s/client/shard_connection.cpp
index ec80fb5c5e5..0a5d178ba3e 100644
--- a/src/mongo/s/client/shard_connection.cpp
+++ b/src/mongo/s/client/shard_connection.cpp
@@ -111,7 +111,7 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
mongo::BSONObj& cmdObj,
int options,
@@ -273,7 +273,7 @@ public:
s->avail = conn;
}
- void checkVersions(OperationContext* txn, const string& ns) {
+ void checkVersions(OperationContext* opCtx, const string& ns) {
vector<ShardId> all;
grid.shardRegistry()->getAllShardIds(&all);
@@ -283,7 +283,7 @@ public:
// Now only check top-level shard connections
for (const ShardId& shardId : all) {
try {
- auto shardStatus = grid.shardRegistry()->getShard(txn, shardId);
+ auto shardStatus = grid.shardRegistry()->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
invariant(shardStatus == ErrorCodes::ShardNotFound);
continue;
@@ -298,7 +298,7 @@ public:
s->created++; // After, so failed creation doesn't get counted
}
- versionManager.checkShardVersionCB(txn, s->avail, ns, false, 1);
+ versionManager.checkShardVersionCB(opCtx, s->avail, ns, false, 1);
} catch (const DBException& ex) {
warning() << "problem while initially checking shard versions on"
<< " " << shardId << causedBy(ex);
@@ -450,9 +450,9 @@ void ShardConnection::_finishInit() {
if (versionManager.isVersionableCB(_conn)) {
auto& client = cc();
- auto txn = client.getOperationContext();
- invariant(txn);
- _setVersion = versionManager.checkShardVersionCB(txn, this, false, 1);
+ auto opCtx = client.getOperationContext();
+ invariant(opCtx);
+ _setVersion = versionManager.checkShardVersionCB(opCtx, this, false, 1);
} else {
// Make sure we didn't specify a manager for a non-versionable connection (i.e. config)
verify(!_manager);
@@ -486,8 +486,8 @@ void ShardConnection::kill() {
}
}
-void ShardConnection::checkMyConnectionVersions(OperationContext* txn, const string& ns) {
- ClientConnections::threadInstance()->checkVersions(txn, ns);
+void ShardConnection::checkMyConnectionVersions(OperationContext* opCtx, const string& ns) {
+ ClientConnections::threadInstance()->checkVersions(opCtx, ns);
}
void ShardConnection::releaseMyConnections() {
diff --git a/src/mongo/s/client/shard_connection.h b/src/mongo/s/client/shard_connection.h
index db535aee5a7..62afb593f74 100644
--- a/src/mongo/s/client/shard_connection.h
+++ b/src/mongo/s/client/shard_connection.h
@@ -113,7 +113,7 @@ public:
}
/** checks all of my thread local connections for the version of this ns */
- static void checkMyConnectionVersions(OperationContext* txn, const std::string& ns);
+ static void checkMyConnectionVersions(OperationContext* opCtx, const std::string& ns);
/**
* Returns all the current sharded connections to the pool.
diff --git a/src/mongo/s/client/shard_local.cpp b/src/mongo/s/client/shard_local.cpp
index 54a0af07ba3..5270842a8ba 100644
--- a/src/mongo/s/client/shard_local.cpp
+++ b/src/mongo/s/client/shard_local.cpp
@@ -92,10 +92,10 @@ bool ShardLocal::isRetriableError(ErrorCodes::Error code, RetryPolicy options) {
}
}
-void ShardLocal::_updateLastOpTimeFromClient(OperationContext* txn,
+void ShardLocal::_updateLastOpTimeFromClient(OperationContext* opCtx,
const repl::OpTime& previousOpTimeOnClient) {
repl::OpTime lastOpTimeFromClient =
- repl::ReplClientInfo::forClient(txn->getClient()).getLastOp();
+ repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp();
invariant(lastOpTimeFromClient >= previousOpTimeOnClient);
if (lastOpTimeFromClient.isNull() || lastOpTimeFromClient == previousOpTimeOnClient) {
return;
@@ -115,19 +115,19 @@ repl::OpTime ShardLocal::_getLastOpTime() {
return _lastOpTime;
}
-Shard::HostWithResponse ShardLocal::_runCommand(OperationContext* txn,
+Shard::HostWithResponse ShardLocal::_runCommand(OperationContext* opCtx,
const ReadPreferenceSetting& unused,
const std::string& dbName,
Milliseconds maxTimeMSOverrideUnused,
const BSONObj& cmdObj) {
repl::OpTime currentOpTimeFromClient =
- repl::ReplClientInfo::forClient(txn->getClient()).getLastOp();
- ON_BLOCK_EXIT([this, &txn, &currentOpTimeFromClient] {
- _updateLastOpTimeFromClient(txn, currentOpTimeFromClient);
+ repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp();
+ ON_BLOCK_EXIT([this, &opCtx, &currentOpTimeFromClient] {
+ _updateLastOpTimeFromClient(opCtx, currentOpTimeFromClient);
});
try {
- DBDirectClient client(txn);
+ DBDirectClient client(opCtx);
rpc::UniqueReply commandResponse = client.runCommandWithMetadata(
dbName, cmdObj.firstElementFieldName(), rpc::makeEmptyMetadata(), cmdObj);
BSONObj responseReply = commandResponse->getCommandReply().getOwned();
@@ -147,29 +147,29 @@ Shard::HostWithResponse ShardLocal::_runCommand(OperationContext* txn,
}
StatusWith<Shard::QueryResponse> ShardLocal::_exhaustiveFindOnConfig(
- OperationContext* txn,
+ OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const repl::ReadConcernLevel& readConcernLevel,
const NamespaceString& nss,
const BSONObj& query,
const BSONObj& sort,
boost::optional<long long> limit) {
- auto replCoord = repl::ReplicationCoordinator::get(txn);
+ auto replCoord = repl::ReplicationCoordinator::get(opCtx);
if (readConcernLevel == repl::ReadConcernLevel::kMajorityReadConcern) {
// Set up operation context with majority read snapshot so correct optime can be retrieved.
- Status status = txn->recoveryUnit()->setReadFromMajorityCommittedSnapshot();
+ Status status = opCtx->recoveryUnit()->setReadFromMajorityCommittedSnapshot();
// Wait for any writes performed by this ShardLocal instance to be committed and visible.
Status readConcernStatus = replCoord->waitUntilOpTimeForRead(
- txn, repl::ReadConcernArgs{_getLastOpTime(), readConcernLevel});
+ opCtx, repl::ReadConcernArgs{_getLastOpTime(), readConcernLevel});
if (!readConcernStatus.isOK()) {
return readConcernStatus;
}
// Inform the storage engine to read from the committed snapshot for the rest of this
// operation.
- status = txn->recoveryUnit()->setReadFromMajorityCommittedSnapshot();
+ status = opCtx->recoveryUnit()->setReadFromMajorityCommittedSnapshot();
if (!status.isOK()) {
return status;
}
@@ -177,7 +177,7 @@ StatusWith<Shard::QueryResponse> ShardLocal::_exhaustiveFindOnConfig(
invariant(readConcernLevel == repl::ReadConcernLevel::kLocalReadConcern);
}
- DBDirectClient client(txn);
+ DBDirectClient client(opCtx);
Query fullQuery(query);
if (!sort.isEmpty()) {
fullQuery.sort(sort);
@@ -207,14 +207,14 @@ StatusWith<Shard::QueryResponse> ShardLocal::_exhaustiveFindOnConfig(
}
}
-Status ShardLocal::createIndexOnConfig(OperationContext* txn,
+Status ShardLocal::createIndexOnConfig(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& keys,
bool unique) {
invariant(ns.db() == "config" || ns.db() == "admin");
try {
- DBDirectClient client(txn);
+ DBDirectClient client(opCtx);
IndexSpec index;
index.addKeys(keys);
index.unique(unique);
diff --git a/src/mongo/s/client/shard_local.h b/src/mongo/s/client/shard_local.h
index 75e97ed0d29..3afdee97fb8 100644
--- a/src/mongo/s/client/shard_local.h
+++ b/src/mongo/s/client/shard_local.h
@@ -58,20 +58,20 @@ public:
bool isRetriableError(ErrorCodes::Error code, RetryPolicy options) final;
- Status createIndexOnConfig(OperationContext* txn,
+ Status createIndexOnConfig(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& keys,
bool unique) override;
private:
- Shard::HostWithResponse _runCommand(OperationContext* txn,
+ Shard::HostWithResponse _runCommand(OperationContext* opCtx,
const ReadPreferenceSetting& unused,
const std::string& dbName,
Milliseconds maxTimeMSOverrideUnused,
const BSONObj& cmdObj) final;
StatusWith<Shard::QueryResponse> _exhaustiveFindOnConfig(
- OperationContext* txn,
+ OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const repl::ReadConcernLevel& readConcernLevel,
const NamespaceString& nss,
@@ -90,7 +90,7 @@ private:
* 'previousOpTimeOnClient' then the command we just ran didn't do a write, and we should leave
* _lastOpTime alone.
*/
- void _updateLastOpTimeFromClient(OperationContext* txn,
+ void _updateLastOpTimeFromClient(OperationContext* opCtx,
const repl::OpTime& previousOpTimeOnClient);
repl::OpTime _getLastOpTime();
diff --git a/src/mongo/s/client/shard_local_test.cpp b/src/mongo/s/client/shard_local_test.cpp
index c667be3ccd0..80d1c12ff81 100644
--- a/src/mongo/s/client/shard_local_test.cpp
+++ b/src/mongo/s/client/shard_local_test.cpp
@@ -47,7 +47,7 @@ namespace {
class ShardLocalTest : public ServiceContextMongoDTest {
protected:
- ServiceContext::UniqueOperationContext _txn;
+ ServiceContext::UniqueOperationContext _opCtx;
std::unique_ptr<ShardLocal> _shardLocal;
/**
@@ -81,17 +81,17 @@ private:
void ShardLocalTest::setUp() {
ServiceContextMongoDTest::setUp();
Client::initThreadIfNotAlready();
- _txn = getGlobalServiceContext()->makeOperationContext(&cc());
+ _opCtx = getGlobalServiceContext()->makeOperationContext(&cc());
serverGlobalParams.clusterRole = ClusterRole::ConfigServer;
_shardLocal = stdx::make_unique<ShardLocal>(ShardId("config"));
const repl::ReplSettings replSettings = {};
repl::setGlobalReplicationCoordinator(
- new repl::ReplicationCoordinatorMock(_txn->getServiceContext(), replSettings));
+ new repl::ReplicationCoordinatorMock(_opCtx->getServiceContext(), replSettings));
repl::getGlobalReplicationCoordinator()->setFollowerMode(repl::MemberState::RS_PRIMARY);
}
void ShardLocalTest::tearDown() {
- _txn.reset();
+ _opCtx.reset();
ServiceContextMongoDTest::tearDown();
repl::setGlobalReplicationCoordinator(nullptr);
}
@@ -106,7 +106,7 @@ StatusWith<Shard::CommandResponse> ShardLocalTest::runFindAndModifyRunCommand(Na
WriteConcernOptions::kMajority, WriteConcernOptions::SyncMode::UNSET, Seconds(15)));
return _shardLocal->runCommandWithFixedRetryAttempts(
- _txn.get(),
+ _opCtx.get(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
nss.db().toString(),
findAndModifyRequest.toBSON(),
@@ -115,7 +115,7 @@ StatusWith<Shard::CommandResponse> ShardLocalTest::runFindAndModifyRunCommand(Na
StatusWith<std::vector<BSONObj>> ShardLocalTest::getIndexes(NamespaceString nss) {
auto response = _shardLocal->runCommandWithFixedRetryAttempts(
- _txn.get(),
+ _opCtx.get(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
nss.db().toString(),
BSON("listIndexes" << nss.coll().toString()),
@@ -148,7 +148,7 @@ StatusWith<Shard::QueryResponse> ShardLocalTest::runFindQuery(NamespaceString ns
BSONObj query,
BSONObj sort,
boost::optional<long long> limit) {
- return _shardLocal->exhaustiveFindOnConfig(_txn.get(),
+ return _shardLocal->exhaustiveFindOnConfig(_opCtx.get(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kMajorityReadConcern,
nss,
@@ -247,7 +247,7 @@ TEST_F(ShardLocalTest, CreateIndex) {
ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, getIndexes(nss).getStatus());
Status status =
- _shardLocal->createIndexOnConfig(_txn.get(), nss, BSON("a" << 1 << "b" << 1), true);
+ _shardLocal->createIndexOnConfig(_opCtx.get(), nss, BSON("a" << 1 << "b" << 1), true);
// Creating the index should implicitly create the collection
ASSERT_OK(status);
@@ -256,13 +256,13 @@ TEST_F(ShardLocalTest, CreateIndex) {
ASSERT_EQ(2U, indexes.size());
// Making an identical index should be a no-op.
- status = _shardLocal->createIndexOnConfig(_txn.get(), nss, BSON("a" << 1 << "b" << 1), true);
+ status = _shardLocal->createIndexOnConfig(_opCtx.get(), nss, BSON("a" << 1 << "b" << 1), true);
ASSERT_OK(status);
indexes = unittest::assertGet(getIndexes(nss));
ASSERT_EQ(2U, indexes.size());
// Trying to make the same index as non-unique should fail.
- status = _shardLocal->createIndexOnConfig(_txn.get(), nss, BSON("a" << 1 << "b" << 1), false);
+ status = _shardLocal->createIndexOnConfig(_opCtx.get(), nss, BSON("a" << 1 << "b" << 1), false);
ASSERT_EQUALS(ErrorCodes::IndexOptionsConflict, status);
indexes = unittest::assertGet(getIndexes(nss));
ASSERT_EQ(2U, indexes.size());
diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp
index 5f30b22c8ea..7cadf2ed8d0 100644
--- a/src/mongo/s/client/shard_registry.cpp
+++ b/src/mongo/s/client/shard_registry.cpp
@@ -103,7 +103,7 @@ ConnectionString ShardRegistry::getConfigServerConnectionString() const {
return getConfigShard()->getConnString();
}
-StatusWith<shared_ptr<Shard>> ShardRegistry::getShard(OperationContext* txn,
+StatusWith<shared_ptr<Shard>> ShardRegistry::getShard(OperationContext* opCtx,
const ShardId& shardId) {
// If we know about the shard, return it.
auto shard = _data.findByShardId(shardId);
@@ -112,7 +112,7 @@ StatusWith<shared_ptr<Shard>> ShardRegistry::getShard(OperationContext* txn,
}
// If we can't find the shard, attempt to reload the ShardRegistry.
- bool didReload = reload(txn);
+ bool didReload = reload(opCtx);
shard = _data.findByShardId(shardId);
// If we found the shard, return it.
@@ -128,7 +128,7 @@ StatusWith<shared_ptr<Shard>> ShardRegistry::getShard(OperationContext* txn,
// If we did not perform the reload ourselves (because there was a concurrent reload), force a
// reload again to ensure that we have seen data at least as up to date as our first reload.
- reload(txn);
+ reload(opCtx);
shard = _data.findByShardId(shardId);
if (shard) {
@@ -226,10 +226,10 @@ void ShardRegistry::_internalReload(const CallbackArgs& cbArgs) {
}
Client::initThreadIfNotAlready("shard registry reload");
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
try {
- reload(txn.get());
+ reload(opCtx.get());
} catch (const DBException& e) {
log() << "Periodic reload of shard registry failed " << causedBy(e) << "; will retry after "
<< kRefreshPeriod;
@@ -257,7 +257,7 @@ bool ShardRegistry::isUp() const {
return _isUp;
}
-bool ShardRegistry::reload(OperationContext* txn) {
+bool ShardRegistry::reload(OperationContext* opCtx) {
stdx::unique_lock<stdx::mutex> reloadLock(_reloadMutex);
if (_reloadState == ReloadState::Reloading) {
@@ -290,7 +290,7 @@ bool ShardRegistry::reload(OperationContext* txn) {
});
- ShardRegistryData currData(txn, _shardFactory.get());
+ ShardRegistryData currData(opCtx, _shardFactory.get());
currData.addConfigShard(_data.getConfigShard());
_data.swap(currData);
@@ -325,10 +325,10 @@ void ShardRegistry::replicaSetChangeConfigServerUpdateHook(const std::string& se
const std::string& newConnectionString) {
// This is run in it's own thread. Exceptions escaping would result in a call to terminate.
Client::initThread("replSetChange");
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
try {
- std::shared_ptr<Shard> s = Grid::get(txn.get())->shardRegistry()->lookupRSName(setName);
+ std::shared_ptr<Shard> s = Grid::get(opCtx.get())->shardRegistry()->lookupRSName(setName);
if (!s) {
LOG(1) << "shard not found for set: " << newConnectionString
<< " when attempting to inform config servers of updated set membership";
@@ -340,13 +340,15 @@ void ShardRegistry::replicaSetChangeConfigServerUpdateHook(const std::string& se
return;
}
- auto status = Grid::get(txn.get())->catalogClient(txn.get())->updateConfigDocument(
- txn.get(),
- ShardType::ConfigNS,
- BSON(ShardType::name(s->getId().toString())),
- BSON("$set" << BSON(ShardType::host(newConnectionString))),
- false,
- ShardingCatalogClient::kMajorityWriteConcern);
+ auto status =
+ Grid::get(opCtx.get())
+ ->catalogClient(opCtx.get())
+ ->updateConfigDocument(opCtx.get(),
+ ShardType::ConfigNS,
+ BSON(ShardType::name(s->getId().toString())),
+ BSON("$set" << BSON(ShardType::host(newConnectionString))),
+ false,
+ ShardingCatalogClient::kMajorityWriteConcern);
if (!status.isOK()) {
error() << "RSChangeWatcher: could not update config db for set: " << setName
<< " to: " << newConnectionString << causedBy(status.getStatus());
@@ -360,13 +362,13 @@ void ShardRegistry::replicaSetChangeConfigServerUpdateHook(const std::string& se
////////////// ShardRegistryData //////////////////
-ShardRegistryData::ShardRegistryData(OperationContext* txn, ShardFactory* shardFactory) {
- _init(txn, shardFactory);
+ShardRegistryData::ShardRegistryData(OperationContext* opCtx, ShardFactory* shardFactory) {
+ _init(opCtx, shardFactory);
}
-void ShardRegistryData::_init(OperationContext* txn, ShardFactory* shardFactory) {
- auto shardsStatus =
- grid.catalogClient(txn)->getAllShards(txn, repl::ReadConcernLevel::kMajorityReadConcern);
+void ShardRegistryData::_init(OperationContext* opCtx, ShardFactory* shardFactory) {
+ auto shardsStatus = grid.catalogClient(opCtx)->getAllShards(
+ opCtx, repl::ReadConcernLevel::kMajorityReadConcern);
if (!shardsStatus.isOK()) {
uasserted(shardsStatus.getStatus().code(),
diff --git a/src/mongo/s/client/shard_registry.h b/src/mongo/s/client/shard_registry.h
index 374329aad7d..92e31156e88 100644
--- a/src/mongo/s/client/shard_registry.h
+++ b/src/mongo/s/client/shard_registry.h
@@ -53,7 +53,7 @@ class ShardType;
class ShardRegistryData {
public:
- ShardRegistryData(OperationContext* txn, ShardFactory* shardFactory);
+ ShardRegistryData(OperationContext* opCtx, ShardFactory* shardFactory);
ShardRegistryData() = default;
~ShardRegistryData() = default;
@@ -101,7 +101,7 @@ private:
/**
* Reads shards docs from the catalog client and fills in maps.
*/
- void _init(OperationContext* txn, ShardFactory* factory);
+ void _init(OperationContext* opCtx, ShardFactory* factory);
/**
* Creates a shard based on the specified information and puts it into the lookup maps.
@@ -171,7 +171,7 @@ public:
* reloading is required, the caller should call this method one more time if the first call
* returned false.
*/
- bool reload(OperationContext* txn);
+ bool reload(OperationContext* opCtx);
/**
* Takes a connection string describing either a shard or config server replica set, looks
@@ -188,7 +188,7 @@ public:
* parameter can actually be the shard name or the HostAndPort for any
* server in the shard.
*/
- StatusWith<std::shared_ptr<Shard>> getShard(OperationContext* txn, const ShardId& shardId);
+ StatusWith<std::shared_ptr<Shard>> getShard(OperationContext* opCtx, const ShardId& shardId);
/**
* Returns a shared pointer to the shard object with the given shard id. The shardId parameter
diff --git a/src/mongo/s/client/shard_remote.cpp b/src/mongo/s/client/shard_remote.cpp
index dea32639d84..3be06bd6ca7 100644
--- a/src/mongo/s/client/shard_remote.cpp
+++ b/src/mongo/s/client/shard_remote.cpp
@@ -141,22 +141,22 @@ std::string ShardRemote::toString() const {
return getId().toString() + ":" + _originalConnString.toString();
}
-BSONObj ShardRemote::_appendMetadataForCommand(OperationContext* txn,
+BSONObj ShardRemote::_appendMetadataForCommand(OperationContext* opCtx,
const ReadPreferenceSetting& readPref) {
BSONObjBuilder builder;
if (logger::globalLogDomain()->shouldLog(
logger::LogComponent::kTracking,
logger::LogSeverity::Debug(1))) { // avoid performance overhead if not logging
- if (!TrackingMetadata::get(txn).getIsLogged()) {
- if (!TrackingMetadata::get(txn).getOperId()) {
- TrackingMetadata::get(txn).initWithOperName("NotSet");
+ if (!TrackingMetadata::get(opCtx).getIsLogged()) {
+ if (!TrackingMetadata::get(opCtx).getOperId()) {
+ TrackingMetadata::get(opCtx).initWithOperName("NotSet");
}
MONGO_LOG_COMPONENT(1, logger::LogComponent::kTracking)
- << TrackingMetadata::get(txn).toString();
- TrackingMetadata::get(txn).setIsLogged(true);
+ << TrackingMetadata::get(opCtx).toString();
+ TrackingMetadata::get(opCtx).setIsLogged(true);
}
- TrackingMetadata metadata = TrackingMetadata::get(txn).constructChildMetadata();
+ TrackingMetadata metadata = TrackingMetadata::get(opCtx).constructChildMetadata();
metadata.writeToMetadata(&builder);
}
@@ -175,7 +175,7 @@ BSONObj ShardRemote::_appendMetadataForCommand(OperationContext* txn,
return builder.obj();
}
-Shard::HostWithResponse ShardRemote::_runCommand(OperationContext* txn,
+Shard::HostWithResponse ShardRemote::_runCommand(OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const string& dbName,
Milliseconds maxTimeMSOverride,
@@ -185,26 +185,26 @@ Shard::HostWithResponse ShardRemote::_runCommand(OperationContext* txn,
if (getId() == "config") {
readPrefWithMinOpTime.minOpTime = grid.configOpTime();
}
- const auto host = _targeter->findHost(txn, readPrefWithMinOpTime);
+ const auto host = _targeter->findHost(opCtx, readPrefWithMinOpTime);
if (!host.isOK()) {
return Shard::HostWithResponse(boost::none, host.getStatus());
}
const Milliseconds requestTimeout =
- std::min(txn->getRemainingMaxTimeMillis(), maxTimeMSOverride);
+ std::min(opCtx->getRemainingMaxTimeMillis(), maxTimeMSOverride);
const RemoteCommandRequest request(
host.getValue(),
dbName,
appendMaxTimeToCmdObj(requestTimeout, cmdObj),
- _appendMetadataForCommand(txn, readPrefWithMinOpTime),
- txn,
+ _appendMetadataForCommand(opCtx, readPrefWithMinOpTime),
+ opCtx,
requestTimeout < Milliseconds::max() ? requestTimeout : RemoteCommandRequest::kNoTimeout);
RemoteCommandResponse swResponse =
Status(ErrorCodes::InternalError, "Internal error running command");
- TaskExecutor* executor = Grid::get(txn)->getExecutorPool()->getFixedExecutor();
+ TaskExecutor* executor = Grid::get(opCtx)->getExecutorPool()->getFixedExecutor();
auto callStatus = executor->scheduleRemoteCommand(
request,
[&swResponse](const RemoteCommandCallbackArgs& args) { swResponse = args.response; });
@@ -241,7 +241,7 @@ Shard::HostWithResponse ShardRemote::_runCommand(OperationContext* txn,
}
StatusWith<Shard::QueryResponse> ShardRemote::_exhaustiveFindOnConfig(
- OperationContext* txn,
+ OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const repl::ReadConcernLevel& readConcernLevel,
const NamespaceString& nss,
@@ -252,7 +252,7 @@ StatusWith<Shard::QueryResponse> ShardRemote::_exhaustiveFindOnConfig(
ReadPreferenceSetting readPrefWithMinOpTime(readPref);
readPrefWithMinOpTime.minOpTime = grid.configOpTime();
- const auto host = _targeter->findHost(txn, readPrefWithMinOpTime);
+ const auto host = _targeter->findHost(opCtx, readPrefWithMinOpTime);
if (!host.isOK()) {
return host.getStatus();
}
@@ -313,7 +313,7 @@ StatusWith<Shard::QueryResponse> ShardRemote::_exhaustiveFindOnConfig(
}
const Milliseconds maxTimeMS =
- std::min(txn->getRemainingMaxTimeMillis(), kDefaultConfigCommandTimeout);
+ std::min(opCtx->getRemainingMaxTimeMillis(), kDefaultConfigCommandTimeout);
BSONObjBuilder findCmdBuilder;
@@ -331,12 +331,12 @@ StatusWith<Shard::QueryResponse> ShardRemote::_exhaustiveFindOnConfig(
qr.asFindCommand(&findCmdBuilder);
}
- Fetcher fetcher(Grid::get(txn)->getExecutorPool()->getFixedExecutor(),
+ Fetcher fetcher(Grid::get(opCtx)->getExecutorPool()->getFixedExecutor(),
host.getValue(),
nss.db().toString(),
findCmdBuilder.done(),
fetcherCallback,
- _appendMetadataForCommand(txn, readPrefWithMinOpTime),
+ _appendMetadataForCommand(opCtx, readPrefWithMinOpTime),
maxTimeMS);
Status scheduleStatus = fetcher.schedule();
if (!scheduleStatus.isOK()) {
@@ -357,7 +357,7 @@ StatusWith<Shard::QueryResponse> ShardRemote::_exhaustiveFindOnConfig(
return response;
}
-Status ShardRemote::createIndexOnConfig(OperationContext* txn,
+Status ShardRemote::createIndexOnConfig(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& keys,
bool unique) {
diff --git a/src/mongo/s/client/shard_remote.h b/src/mongo/s/client/shard_remote.h
index 908ebf33eee..c7f6931d77e 100644
--- a/src/mongo/s/client/shard_remote.h
+++ b/src/mongo/s/client/shard_remote.h
@@ -70,7 +70,7 @@ public:
bool isRetriableError(ErrorCodes::Error code, RetryPolicy options) final;
- Status createIndexOnConfig(OperationContext* txn,
+ Status createIndexOnConfig(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& keys,
bool unique) override;
@@ -80,16 +80,17 @@ private:
* Returns the metadata that should be used when running commands against this shard with
* the given read preference.
*/
- BSONObj _appendMetadataForCommand(OperationContext* txn, const ReadPreferenceSetting& readPref);
+ BSONObj _appendMetadataForCommand(OperationContext* opCtx,
+ const ReadPreferenceSetting& readPref);
- Shard::HostWithResponse _runCommand(OperationContext* txn,
+ Shard::HostWithResponse _runCommand(OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const std::string& dbname,
Milliseconds maxTimeMSOverride,
const BSONObj& cmdObj) final;
StatusWith<QueryResponse> _exhaustiveFindOnConfig(
- OperationContext* txn,
+ OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const repl::ReadConcernLevel& readConcernLevel,
const NamespaceString& nss,
diff --git a/src/mongo/s/client/sharding_connection_hook.cpp b/src/mongo/s/client/sharding_connection_hook.cpp
index 7085a290a0c..36cb24bf9fc 100644
--- a/src/mongo/s/client/sharding_connection_hook.cpp
+++ b/src/mongo/s/client/sharding_connection_hook.cpp
@@ -78,9 +78,9 @@ void ShardingConnectionHook::onCreate(DBClientBase* conn) {
});
}
conn->setRequestMetadataWriter(
- [this](OperationContext* txn, BSONObjBuilder* metadataBob, StringData hostStringData) {
+ [this](OperationContext* opCtx, BSONObjBuilder* metadataBob, StringData hostStringData) {
return _egressHook->writeRequestMetadata(
- _shardedConnections, txn, hostStringData, metadataBob);
+ _shardedConnections, opCtx, hostStringData, metadataBob);
});
diff --git a/src/mongo/s/client/version_manager.cpp b/src/mongo/s/client/version_manager.cpp
index 107579eeaab..299a89f5941 100644
--- a/src/mongo/s/client/version_manager.cpp
+++ b/src/mongo/s/client/version_manager.cpp
@@ -107,7 +107,7 @@ private:
/**
* Sends the setShardVersion command on the specified connection.
*/
-bool setShardVersion(OperationContext* txn,
+bool setShardVersion(OperationContext* opCtx,
DBClientBase* conn,
const string& ns,
const ConnectionString& configServer,
@@ -174,7 +174,7 @@ DBClientBase* getVersionable(DBClientBase* conn) {
* Eventually this should go completely away, but for now many commands rely on unversioned but
* mongos-specific behavior on mongod (auditing and replication information in commands)
*/
-bool initShardVersionEmptyNS(OperationContext* txn, DBClientBase* conn_in) {
+bool initShardVersionEmptyNS(OperationContext* opCtx, DBClientBase* conn_in) {
try {
// May throw if replica set primary is down
DBClientBase* const conn = getVersionable(conn_in);
@@ -187,7 +187,7 @@ bool initShardVersionEmptyNS(OperationContext* txn, DBClientBase* conn_in) {
}
BSONObj result;
- const bool ok = setShardVersion(txn,
+ const bool ok = setShardVersion(opCtx,
conn,
"",
grid.shardRegistry()->getConfigServerConnectionString(),
@@ -241,7 +241,7 @@ bool initShardVersionEmptyNS(OperationContext* txn, DBClientBase* conn_in) {
*
* @return true if we contacted the remote host
*/
-bool checkShardVersion(OperationContext* txn,
+bool checkShardVersion(OperationContext* opCtx,
DBClientBase* conn_in,
const string& ns,
shared_ptr<ChunkManager> refManager,
@@ -249,7 +249,7 @@ bool checkShardVersion(OperationContext* txn,
int tryNumber) {
// Empty namespaces are special - we require initialization but not versioning
if (ns.size() == 0) {
- return initShardVersionEmptyNS(txn, conn_in);
+ return initShardVersionEmptyNS(opCtx, conn_in);
}
DBClientBase* const conn = getVersionable(conn_in);
@@ -258,10 +258,10 @@ bool checkShardVersion(OperationContext* txn,
const NamespaceString nss(ns);
if (authoritative) {
- ScopedChunkManager::refreshAndGet(txn, nss);
+ ScopedChunkManager::refreshAndGet(opCtx, nss);
}
- auto scopedCMStatus = ScopedChunkManager::get(txn, nss);
+ auto scopedCMStatus = ScopedChunkManager::get(opCtx, nss);
if (!scopedCMStatus.isOK()) {
return false;
@@ -283,7 +283,7 @@ bool checkShardVersion(OperationContext* txn,
return false;
}
- const auto shardRegistry = Grid::get(txn)->shardRegistry();
+ const auto shardRegistry = Grid::get(opCtx)->shardRegistry();
const auto shard = shardRegistry->getShardForHostNoReload(
uassertStatusOK(HostAndPort::parse(conn->getServerAddress())));
@@ -350,7 +350,7 @@ bool checkShardVersion(OperationContext* txn,
<< ", current chunk manager iteration is " << officialSequenceNumber;
BSONObj result;
- if (setShardVersion(txn,
+ if (setShardVersion(opCtx,
conn,
ns,
shardRegistry->getConfigServerConnectionString(),
@@ -375,7 +375,7 @@ bool checkShardVersion(OperationContext* txn,
if (!authoritative) {
// use the original connection and get a fresh versionable connection
// since conn can be invalidated (or worse, freed) after the failure
- checkShardVersion(txn, conn_in, ns, refManager, 1, tryNumber + 1);
+ checkShardVersion(opCtx, conn_in, ns, refManager, 1, tryNumber + 1);
return true;
}
@@ -384,10 +384,10 @@ bool checkShardVersion(OperationContext* txn,
warning() << "reloading full configuration for " << conf->name()
<< ", connection state indicates significant version changes";
- Grid::get(txn)->catalogCache()->invalidate(nss.db());
+ Grid::get(opCtx)->catalogCache()->invalidate(nss.db());
}
- conf->getChunkManager(txn, nss.ns(), true);
+ conf->getChunkManager(opCtx, nss.ns(), true);
}
const int maxNumTries = 7;
@@ -397,7 +397,7 @@ bool checkShardVersion(OperationContext* txn,
sleepmillis(10 * tryNumber);
// use the original connection and get a fresh versionable connection
// since conn can be invalidated (or worse, freed) after the failure
- checkShardVersion(txn, conn_in, ns, refManager, true, tryNumber + 1);
+ checkShardVersion(opCtx, conn_in, ns, refManager, true, tryNumber + 1);
return true;
}
@@ -426,20 +426,20 @@ bool VersionManager::isVersionableCB(DBClientBase* conn) {
return conn->type() == ConnectionString::MASTER || conn->type() == ConnectionString::SET;
}
-bool VersionManager::checkShardVersionCB(OperationContext* txn,
+bool VersionManager::checkShardVersionCB(OperationContext* opCtx,
DBClientBase* conn_in,
const string& ns,
bool authoritative,
int tryNumber) {
- return checkShardVersion(txn, conn_in, ns, nullptr, authoritative, tryNumber);
+ return checkShardVersion(opCtx, conn_in, ns, nullptr, authoritative, tryNumber);
}
-bool VersionManager::checkShardVersionCB(OperationContext* txn,
+bool VersionManager::checkShardVersionCB(OperationContext* opCtx,
ShardConnection* conn_in,
bool authoritative,
int tryNumber) {
return checkShardVersion(
- txn, conn_in->get(), conn_in->getNS(), conn_in->getManager(), authoritative, tryNumber);
+ opCtx, conn_in->get(), conn_in->getNS(), conn_in->getManager(), authoritative, tryNumber);
}
} // namespace mongo
diff --git a/src/mongo/s/cluster_identity_loader.cpp b/src/mongo/s/cluster_identity_loader.cpp
index 741a280ab4c..37a63a5cee2 100644
--- a/src/mongo/s/cluster_identity_loader.cpp
+++ b/src/mongo/s/cluster_identity_loader.cpp
@@ -60,7 +60,7 @@ OID ClusterIdentityLoader::getClusterId() {
return _lastLoadResult.getValue();
}
-Status ClusterIdentityLoader::loadClusterId(OperationContext* txn,
+Status ClusterIdentityLoader::loadClusterId(OperationContext* opCtx,
const repl::ReadConcernLevel& readConcernLevel) {
stdx::unique_lock<stdx::mutex> lk(_mutex);
if (_initializationState == InitializationState::kInitialized) {
@@ -79,7 +79,7 @@ Status ClusterIdentityLoader::loadClusterId(OperationContext* txn,
_initializationState = InitializationState::kLoading;
lk.unlock();
- auto loadStatus = _fetchClusterIdFromConfig(txn, readConcernLevel);
+ auto loadStatus = _fetchClusterIdFromConfig(opCtx, readConcernLevel);
lk.lock();
invariant(_initializationState == InitializationState::kLoading);
@@ -94,9 +94,9 @@ Status ClusterIdentityLoader::loadClusterId(OperationContext* txn,
}
StatusWith<OID> ClusterIdentityLoader::_fetchClusterIdFromConfig(
- OperationContext* txn, const repl::ReadConcernLevel& readConcernLevel) {
- auto catalogClient = Grid::get(txn)->catalogClient(txn);
- auto loadResult = catalogClient->getConfigVersion(txn, readConcernLevel);
+ OperationContext* opCtx, const repl::ReadConcernLevel& readConcernLevel) {
+ auto catalogClient = Grid::get(opCtx)->catalogClient(opCtx);
+ auto loadResult = catalogClient->getConfigVersion(opCtx, readConcernLevel);
if (!loadResult.isOK()) {
return Status(loadResult.getStatus().code(),
str::stream() << "Error loading clusterID"
diff --git a/src/mongo/s/cluster_identity_loader.h b/src/mongo/s/cluster_identity_loader.h
index d34f5368850..c8871cb1c56 100644
--- a/src/mongo/s/cluster_identity_loader.h
+++ b/src/mongo/s/cluster_identity_loader.h
@@ -71,7 +71,7 @@ public:
* If another thread is already in the process of loading the cluster ID, concurrent calls will
* wait for that thread to finish and then return its results.
*/
- Status loadClusterId(OperationContext* txn, const repl::ReadConcernLevel& readConcernLevel);
+ Status loadClusterId(OperationContext* opCtx, const repl::ReadConcernLevel& readConcernLevel);
/**
* Called if the config.version document is rolled back. Notifies the ClusterIdentityLoader
@@ -90,7 +90,7 @@ private:
* Queries the config.version collection on the config server, extracts the cluster ID from
* the version document, and returns it.
*/
- StatusWith<OID> _fetchClusterIdFromConfig(OperationContext* txn,
+ StatusWith<OID> _fetchClusterIdFromConfig(OperationContext* opCtx,
const repl::ReadConcernLevel& readConcernLevel);
stdx::mutex _mutex;
diff --git a/src/mongo/s/commands/chunk_manager_targeter.cpp b/src/mongo/s/commands/chunk_manager_targeter.cpp
index 9dfef97dfda..8296b0066a9 100644
--- a/src/mongo/s/commands/chunk_manager_targeter.cpp
+++ b/src/mongo/s/commands/chunk_manager_targeter.cpp
@@ -117,7 +117,7 @@ UpdateType getUpdateExprType(const BSONObj& updateExpr) {
* { _id : { $lt : 30 } } => false
* { foo : <anything> } => false
*/
-bool isExactIdQuery(OperationContext* txn, const CanonicalQuery& query, ChunkManager* manager) {
+bool isExactIdQuery(OperationContext* opCtx, const CanonicalQuery& query, ChunkManager* manager) {
auto shardKey = virtualIdShardKey.extractShardKeyFromQuery(query);
BSONElement idElt = shardKey["_id"];
@@ -288,8 +288,8 @@ ChunkManagerTargeter::ChunkManagerTargeter(const NamespaceString& nss, TargeterS
: _nss(nss), _needsTargetingRefresh(false), _stats(stats) {}
-Status ChunkManagerTargeter::init(OperationContext* txn) {
- auto scopedCMStatus = ScopedChunkManager::getOrCreate(txn, _nss);
+Status ChunkManagerTargeter::init(OperationContext* opCtx) {
+ auto scopedCMStatus = ScopedChunkManager::getOrCreate(opCtx, _nss);
if (!scopedCMStatus.isOK()) {
return scopedCMStatus.getStatus();
}
@@ -305,7 +305,7 @@ const NamespaceString& ChunkManagerTargeter::getNS() const {
return _nss;
}
-Status ChunkManagerTargeter::targetInsert(OperationContext* txn,
+Status ChunkManagerTargeter::targetInsert(OperationContext* opCtx,
const BSONObj& doc,
ShardEndpoint** endpoint) const {
BSONObj shardKey;
@@ -349,7 +349,7 @@ Status ChunkManagerTargeter::targetInsert(OperationContext* txn,
return Status::OK();
}
-Status ChunkManagerTargeter::targetUpdate(OperationContext* txn,
+Status ChunkManagerTargeter::targetUpdate(OperationContext* opCtx,
const BatchedUpdateDocument& updateDoc,
vector<ShardEndpoint*>* endpoints) const {
//
@@ -393,7 +393,7 @@ Status ChunkManagerTargeter::targetUpdate(OperationContext* txn,
if (updateType == UpdateType_OpStyle) {
// Target using the query
StatusWith<BSONObj> status =
- _manager->getShardKeyPattern().extractShardKeyFromQuery(txn, query);
+ _manager->getShardKeyPattern().extractShardKeyFromQuery(opCtx, query);
// Bad query
if (!status.isOK())
@@ -445,7 +445,7 @@ Status ChunkManagerTargeter::targetUpdate(OperationContext* txn,
if (!collation.isEmpty()) {
qr->setCollation(collation);
}
- auto cq = CanonicalQuery::canonicalize(txn, std::move(qr), ExtensionsCallbackNoop());
+ auto cq = CanonicalQuery::canonicalize(opCtx, std::move(qr), ExtensionsCallbackNoop());
if (!cq.isOK()) {
return Status(cq.getStatus().code(),
str::stream() << "Could not parse update query " << updateDoc.getQuery()
@@ -453,7 +453,8 @@ Status ChunkManagerTargeter::targetUpdate(OperationContext* txn,
}
// Single (non-multi) updates must target a single shard or be exact-ID.
- if (_manager && !updateDoc.getMulti() && !isExactIdQuery(txn, *cq.getValue(), _manager.get())) {
+ if (_manager && !updateDoc.getMulti() &&
+ !isExactIdQuery(opCtx, *cq.getValue(), _manager.get())) {
return Status(ErrorCodes::ShardKeyNotFound,
str::stream()
<< "A single update on a sharded collection must contain an exact "
@@ -466,13 +467,13 @@ Status ChunkManagerTargeter::targetUpdate(OperationContext* txn,
}
if (updateType == UpdateType_OpStyle) {
- return targetQuery(txn, query, collation, endpoints);
+ return targetQuery(opCtx, query, collation, endpoints);
} else {
- return targetDoc(txn, updateExpr, collation, endpoints);
+ return targetDoc(opCtx, updateExpr, collation, endpoints);
}
}
-Status ChunkManagerTargeter::targetDelete(OperationContext* txn,
+Status ChunkManagerTargeter::targetDelete(OperationContext* opCtx,
const BatchedDeleteDocument& deleteDoc,
vector<ShardEndpoint*>* endpoints) const {
BSONObj shardKey;
@@ -486,7 +487,7 @@ Status ChunkManagerTargeter::targetDelete(OperationContext* txn,
// Get the shard key
StatusWith<BSONObj> status =
- _manager->getShardKeyPattern().extractShardKeyFromQuery(txn, deleteDoc.getQuery());
+ _manager->getShardKeyPattern().extractShardKeyFromQuery(opCtx, deleteDoc.getQuery());
// Bad query
if (!status.isOK())
@@ -516,7 +517,7 @@ Status ChunkManagerTargeter::targetDelete(OperationContext* txn,
if (!collation.isEmpty()) {
qr->setCollation(collation);
}
- auto cq = CanonicalQuery::canonicalize(txn, std::move(qr), ExtensionsCallbackNoop());
+ auto cq = CanonicalQuery::canonicalize(opCtx, std::move(qr), ExtensionsCallbackNoop());
if (!cq.isOK()) {
return Status(cq.getStatus().code(),
str::stream() << "Could not parse delete query " << deleteDoc.getQuery()
@@ -525,7 +526,7 @@ Status ChunkManagerTargeter::targetDelete(OperationContext* txn,
// Single deletes must target a single shard or be exact-ID.
if (_manager && deleteDoc.getLimit() == 1 &&
- !isExactIdQuery(txn, *cq.getValue(), _manager.get())) {
+ !isExactIdQuery(opCtx, *cq.getValue(), _manager.get())) {
return Status(ErrorCodes::ShardKeyNotFound,
str::stream()
<< "A single delete on a sharded collection must contain an exact "
@@ -537,19 +538,19 @@ Status ChunkManagerTargeter::targetDelete(OperationContext* txn,
<< _manager->getShardKeyPattern().toString());
}
- return targetQuery(txn, deleteDoc.getQuery(), collation, endpoints);
+ return targetQuery(opCtx, deleteDoc.getQuery(), collation, endpoints);
}
-Status ChunkManagerTargeter::targetDoc(OperationContext* txn,
+Status ChunkManagerTargeter::targetDoc(OperationContext* opCtx,
const BSONObj& doc,
const BSONObj& collation,
vector<ShardEndpoint*>* endpoints) const {
// NOTE: This is weird and fragile, but it's the way our language works right now -
// documents are either A) invalid or B) valid equality queries over themselves.
- return targetQuery(txn, doc, collation, endpoints);
+ return targetQuery(opCtx, doc, collation, endpoints);
}
-Status ChunkManagerTargeter::targetQuery(OperationContext* txn,
+Status ChunkManagerTargeter::targetQuery(OperationContext* opCtx,
const BSONObj& query,
const BSONObj& collation,
vector<ShardEndpoint*>* endpoints) const {
@@ -562,7 +563,7 @@ Status ChunkManagerTargeter::targetQuery(OperationContext* txn,
set<ShardId> shardIds;
if (_manager) {
try {
- _manager->getShardIdsForQuery(txn, query, collation, &shardIds);
+ _manager->getShardIdsForQuery(opCtx, query, collation, &shardIds);
} catch (const DBException& ex) {
return ex.toStatus();
}
@@ -671,7 +672,7 @@ void ChunkManagerTargeter::noteCouldNotTarget() {
_needsTargetingRefresh = true;
}
-Status ChunkManagerTargeter::refreshIfNeeded(OperationContext* txn, bool* wasChanged) {
+Status ChunkManagerTargeter::refreshIfNeeded(OperationContext* opCtx, bool* wasChanged) {
bool dummy;
if (!wasChanged) {
wasChanged = &dummy;
@@ -694,7 +695,7 @@ Status ChunkManagerTargeter::refreshIfNeeded(OperationContext* txn, bool* wasCha
shared_ptr<ChunkManager> lastManager = _manager;
shared_ptr<Shard> lastPrimary = _primary;
- auto scopedCMStatus = ScopedChunkManager::getOrCreate(txn, _nss);
+ auto scopedCMStatus = ScopedChunkManager::getOrCreate(opCtx, _nss);
if (!scopedCMStatus.isOK()) {
return scopedCMStatus.getStatus();
}
@@ -724,7 +725,7 @@ Status ChunkManagerTargeter::refreshIfNeeded(OperationContext* txn, bool* wasCha
// If didn't already refresh the targeting information, refresh it
if (!alreadyRefreshed) {
// To match previous behavior, we just need an incremental refresh here
- return refreshNow(txn, RefreshType_RefreshChunkManager);
+ return refreshNow(opCtx, RefreshType_RefreshChunkManager);
}
*wasChanged = isMetadataDifferent(lastManager, lastPrimary, _manager, _primary);
@@ -740,10 +741,10 @@ Status ChunkManagerTargeter::refreshIfNeeded(OperationContext* txn, bool* wasCha
if (result == CompareResult_Unknown) {
// Our current shard versions aren't all comparable to the old versions, maybe drop
- return refreshNow(txn, RefreshType_ReloadDatabase);
+ return refreshNow(opCtx, RefreshType_ReloadDatabase);
} else if (result == CompareResult_LT) {
// Our current shard versions are less than the remote versions, but no drop
- return refreshNow(txn, RefreshType_RefreshChunkManager);
+ return refreshNow(opCtx, RefreshType_RefreshChunkManager);
}
*wasChanged = isMetadataDifferent(lastManager, lastPrimary, _manager, _primary);
@@ -755,17 +756,17 @@ Status ChunkManagerTargeter::refreshIfNeeded(OperationContext* txn, bool* wasCha
return Status::OK();
}
-Status ChunkManagerTargeter::refreshNow(OperationContext* txn, RefreshType refreshType) {
+Status ChunkManagerTargeter::refreshNow(OperationContext* opCtx, RefreshType refreshType) {
if (refreshType == RefreshType_ReloadDatabase) {
- Grid::get(txn)->catalogCache()->invalidate(_nss.db().toString());
+ Grid::get(opCtx)->catalogCache()->invalidate(_nss.db().toString());
}
// Try not to spam the configs
refreshBackoff();
- ScopedChunkManager::refreshAndGet(txn, _nss);
+ ScopedChunkManager::refreshAndGet(opCtx, _nss);
- auto scopedCMStatus = ScopedChunkManager::get(txn, _nss);
+ auto scopedCMStatus = ScopedChunkManager::get(opCtx, _nss);
if (!scopedCMStatus.isOK()) {
return scopedCMStatus.getStatus();
}
diff --git a/src/mongo/s/commands/chunk_manager_targeter.h b/src/mongo/s/commands/chunk_manager_targeter.h
index 9c8f136dad8..049cdb8d858 100644
--- a/src/mongo/s/commands/chunk_manager_targeter.h
+++ b/src/mongo/s/commands/chunk_manager_targeter.h
@@ -69,20 +69,22 @@ public:
*
* Returns !OK if the information could not be initialized.
*/
- Status init(OperationContext* txn);
+ Status init(OperationContext* opCtx);
const NamespaceString& getNS() const;
// Returns ShardKeyNotFound if document does not have a full shard key.
- Status targetInsert(OperationContext* txn, const BSONObj& doc, ShardEndpoint** endpoint) const;
+ Status targetInsert(OperationContext* opCtx,
+ const BSONObj& doc,
+ ShardEndpoint** endpoint) const;
// Returns ShardKeyNotFound if the update can't be targeted without a shard key.
- Status targetUpdate(OperationContext* txn,
+ Status targetUpdate(OperationContext* opCtx,
const BatchedUpdateDocument& updateDoc,
std::vector<ShardEndpoint*>* endpoints) const;
// Returns ShardKeyNotFound if the delete can't be targeted without a shard key.
- Status targetDelete(OperationContext* txn,
+ Status targetDelete(OperationContext* opCtx,
const BatchedDeleteDocument& deleteDoc,
std::vector<ShardEndpoint*>* endpoints) const;
@@ -103,7 +105,7 @@ public:
*
* Also see NSTargeter::refreshIfNeeded().
*/
- Status refreshIfNeeded(OperationContext* txn, bool* wasChanged);
+ Status refreshIfNeeded(OperationContext* opCtx, bool* wasChanged);
private:
// Different ways we can refresh metadata
@@ -120,7 +122,7 @@ private:
/**
* Performs an actual refresh from the config server.
*/
- Status refreshNow(OperationContext* txn, RefreshType refreshType);
+ Status refreshNow(OperationContext* opCtx, RefreshType refreshType);
/**
* Returns a vector of ShardEndpoints where a document might need to be placed.
@@ -129,7 +131,7 @@ private:
*
* If 'collation' is empty, we use the collection default collation for targeting.
*/
- Status targetDoc(OperationContext* txn,
+ Status targetDoc(OperationContext* opCtx,
const BSONObj& doc,
const BSONObj& collation,
std::vector<ShardEndpoint*>* endpoints) const;
@@ -141,7 +143,7 @@ private:
*
* If 'collation' is empty, we use the collection default collation for targeting.
*/
- Status targetQuery(OperationContext* txn,
+ Status targetQuery(OperationContext* opCtx,
const BSONObj& query,
const BSONObj& collation,
std::vector<ShardEndpoint*>* endpoints) const;
diff --git a/src/mongo/s/commands/cluster_add_shard_cmd.cpp b/src/mongo/s/commands/cluster_add_shard_cmd.cpp
index e2cb0e4f117..c54804992d2 100644
--- a/src/mongo/s/commands/cluster_add_shard_cmd.cpp
+++ b/src/mongo/s/commands/cluster_add_shard_cmd.cpp
@@ -80,7 +80,7 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -88,9 +88,9 @@ public:
BSONObjBuilder& result) {
auto parsedRequest = uassertStatusOK(AddShardRequest::parseFromMongosCommand(cmdObj));
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto cmdResponseStatus = uassertStatusOK(
- configShard->runCommandWithFixedRetryAttempts(txn,
+ configShard->runCommandWithFixedRetryAttempts(opCtx,
kPrimaryOnlyReadPreference,
"admin",
parsedRequest.toCommandForConfig(),
@@ -103,8 +103,8 @@ public:
result << "shardAdded" << shardAdded;
// Ensure the added shard is visible to this process.
- auto shardRegistry = Grid::get(txn)->shardRegistry();
- if (!shardRegistry->getShard(txn, shardAdded).isOK()) {
+ auto shardRegistry = Grid::get(opCtx)->shardRegistry();
+ if (!shardRegistry->getShard(opCtx, shardAdded).isOK()) {
return appendCommandStatus(result,
{ErrorCodes::OperationFailed,
"Could not find shard metadata for shard after adding it. "
diff --git a/src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp b/src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp
index 6ab4e01cd54..43f2b4a3539 100644
--- a/src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp
+++ b/src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp
@@ -96,7 +96,7 @@ public:
return Status::OK();
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -108,9 +108,9 @@ public:
parsedRequest.appendAsConfigCommand(&cmdBuilder);
cmdBuilder.append("writeConcern", kMajorityWriteConcern.toBSON());
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto cmdResponseStatus = uassertStatusOK(
- configShard->runCommandWithFixedRetryAttempts(txn,
+ configShard->runCommandWithFixedRetryAttempts(opCtx,
kPrimaryOnlyReadPreference,
"admin",
cmdBuilder.obj(),
diff --git a/src/mongo/s/commands/cluster_aggregate.cpp b/src/mongo/s/commands/cluster_aggregate.cpp
index 07bf5dcb5c9..a6887ea0498 100644
--- a/src/mongo/s/commands/cluster_aggregate.cpp
+++ b/src/mongo/s/commands/cluster_aggregate.cpp
@@ -61,12 +61,13 @@
namespace mongo {
-Status ClusterAggregate::runAggregate(OperationContext* txn,
+Status ClusterAggregate::runAggregate(OperationContext* opCtx,
const Namespaces& namespaces,
BSONObj cmdObj,
int options,
BSONObjBuilder* result) {
- auto scopedShardDbStatus = ScopedShardDatabase::getExisting(txn, namespaces.executionNss.db());
+ auto scopedShardDbStatus =
+ ScopedShardDatabase::getExisting(opCtx, namespaces.executionNss.db());
if (!scopedShardDbStatus.isOK()) {
appendEmptyResultSet(
*result, scopedShardDbStatus.getStatus(), namespaces.requestedNss.ns());
@@ -96,21 +97,21 @@ Status ClusterAggregate::runAggregate(OperationContext* txn,
}
if (!conf->isSharded(namespaces.executionNss.ns())) {
- return aggPassthrough(txn, namespaces, conf, cmdObj, result, options);
+ return aggPassthrough(opCtx, namespaces, conf, cmdObj, result, options);
}
- auto chunkMgr = conf->getChunkManager(txn, namespaces.executionNss.ns());
+ auto chunkMgr = conf->getChunkManager(opCtx, namespaces.executionNss.ns());
std::unique_ptr<CollatorInterface> collation;
if (!request.getValue().getCollation().isEmpty()) {
- collation = uassertStatusOK(CollatorFactoryInterface::get(txn->getServiceContext())
+ collation = uassertStatusOK(CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(request.getValue().getCollation()));
} else if (chunkMgr->getDefaultCollator()) {
collation = chunkMgr->getDefaultCollator()->clone();
}
boost::intrusive_ptr<ExpressionContext> mergeCtx = new ExpressionContext(
- txn, request.getValue(), std::move(collation), std::move(resolvedNamespaces));
+ opCtx, request.getValue(), std::move(collation), std::move(resolvedNamespaces));
mergeCtx->inRouter = true;
// explicitly *not* setting mergeCtx->tempDir
@@ -127,7 +128,7 @@ Status ClusterAggregate::runAggregate(OperationContext* txn,
const bool singleShard = [&]() {
BSONObj firstMatchQuery = pipeline.getValue()->getInitialQuery();
BSONObj shardKeyMatches = uassertStatusOK(
- chunkMgr->getShardKeyPattern().extractShardKeyFromQuery(txn, firstMatchQuery));
+ chunkMgr->getShardKeyPattern().extractShardKeyFromQuery(opCtx, firstMatchQuery));
if (shardKeyMatches.isEmpty()) {
return false;
@@ -176,7 +177,7 @@ Status ClusterAggregate::runAggregate(OperationContext* txn,
// Run the command on the shards
// TODO need to make sure cursors are killed if a retry is needed
std::vector<Strategy::CommandResult> shardResults;
- Strategy::commandOp(txn,
+ Strategy::commandOp(opCtx,
namespaces.executionNss.db().toString(),
shardedCommand,
options,
@@ -210,14 +211,14 @@ Status ClusterAggregate::runAggregate(OperationContext* txn,
if (!needSplit) {
invariant(shardResults.size() == 1);
invariant(shardResults[0].target.getServers().size() == 1);
- auto executorPool = Grid::get(txn)->getExecutorPool();
+ auto executorPool = Grid::get(opCtx)->getExecutorPool();
const BSONObj reply =
- uassertStatusOK(storePossibleCursor(txn,
+ uassertStatusOK(storePossibleCursor(opCtx,
shardResults[0].target.getServers()[0],
shardResults[0].result,
namespaces.requestedNss,
executorPool->getArbitraryExecutor(),
- Grid::get(txn)->getCursorManager()));
+ Grid::get(opCtx)->getCursorManager()));
result->appendElements(reply);
return getStatusFromCommandResult(reply);
}
@@ -258,17 +259,17 @@ Status ClusterAggregate::runAggregate(OperationContext* txn,
// Run merging command on random shard, unless a stage needs the primary shard. Need to use
// ShardConnection so that the merging mongod is sent the config servers on connection init.
- auto& prng = txn->getClient()->getPrng();
+ auto& prng = opCtx->getClient()->getPrng();
const auto& mergingShardId =
(needPrimaryShardMerger || internalQueryAlwaysMergeOnPrimaryShard.load())
? conf->getPrimaryId()
: shardResults[prng.nextInt32(shardResults.size())].shardTargetId;
const auto mergingShard =
- uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, mergingShardId));
+ uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, mergingShardId));
ShardConnection conn(mergingShard->getConnString(), outputNsOrEmpty);
BSONObj mergedResults =
- aggRunCommand(txn, conn.get(), namespaces, mergeCmd.freeze().toBson(), options);
+ aggRunCommand(opCtx, conn.get(), namespaces, mergeCmd.freeze().toBson(), options);
conn.done();
if (auto wcErrorElem = mergedResults["writeConcernError"]) {
@@ -385,7 +386,7 @@ void ClusterAggregate::killAllCursors(const std::vector<Strategy::CommandResult>
}
}
-BSONObj ClusterAggregate::aggRunCommand(OperationContext* txn,
+BSONObj ClusterAggregate::aggRunCommand(OperationContext* opCtx,
DBClientBase* conn,
const Namespaces& namespaces,
BSONObj cmd,
@@ -413,30 +414,30 @@ BSONObj ClusterAggregate::aggRunCommand(OperationContext* txn,
throw RecvStaleConfigException("command failed because of stale config", result);
}
- auto executorPool = Grid::get(txn)->getExecutorPool();
- result = uassertStatusOK(storePossibleCursor(txn,
+ auto executorPool = Grid::get(opCtx)->getExecutorPool();
+ result = uassertStatusOK(storePossibleCursor(opCtx,
HostAndPort(cursor->originalHost()),
result,
namespaces.requestedNss,
executorPool->getArbitraryExecutor(),
- Grid::get(txn)->getCursorManager()));
+ Grid::get(opCtx)->getCursorManager()));
return result;
}
-Status ClusterAggregate::aggPassthrough(OperationContext* txn,
+Status ClusterAggregate::aggPassthrough(OperationContext* opCtx,
const Namespaces& namespaces,
DBConfig* conf,
BSONObj cmdObj,
BSONObjBuilder* out,
int queryOptions) {
// Temporary hack. See comment on declaration for details.
- auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, conf->getPrimaryId());
+ auto shardStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, conf->getPrimaryId());
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
ShardConnection conn(shardStatus.getValue()->getConnString(), "");
- BSONObj result = aggRunCommand(txn, conn.get(), namespaces, cmdObj, queryOptions);
+ BSONObj result = aggRunCommand(opCtx, conn.get(), namespaces, cmdObj, queryOptions);
conn.done();
// First append the properly constructed writeConcernError. It will then be skipped
@@ -472,7 +473,8 @@ Status ClusterAggregate::aggPassthrough(OperationContext* txn,
Namespaces nsStruct;
nsStruct.requestedNss = namespaces.requestedNss;
nsStruct.executionNss = resolvedView.getNamespace();
- return ClusterAggregate::runAggregate(txn, nsStruct, aggCmd.getValue(), queryOptions, out);
+ return ClusterAggregate::runAggregate(
+ opCtx, nsStruct, aggCmd.getValue(), queryOptions, out);
}
return getStatusFromCommandResult(result);
diff --git a/src/mongo/s/commands/cluster_aggregate.h b/src/mongo/s/commands/cluster_aggregate.h
index da8e7bb46bc..b0fdd5d7375 100644
--- a/src/mongo/s/commands/cluster_aggregate.h
+++ b/src/mongo/s/commands/cluster_aggregate.h
@@ -63,7 +63,7 @@ public:
* Executes an aggregation command. 'cmdObj' specifies the aggregation to run. Fills in 'result'
* with the command response.
*/
- static Status runAggregate(OperationContext* txn,
+ static Status runAggregate(OperationContext* opCtx,
const Namespaces& namespaces,
BSONObj cmdObj,
int options,
@@ -82,13 +82,13 @@ private:
// could be different from conn->getServerAddress() for connections that map to
// multiple servers such as for replica sets. These also take care of registering
// returned cursors.
- static BSONObj aggRunCommand(OperationContext* txn,
+ static BSONObj aggRunCommand(OperationContext* opCtx,
DBClientBase* conn,
const Namespaces& namespaces,
BSONObj cmd,
int queryOptions);
- static Status aggPassthrough(OperationContext* txn,
+ static Status aggPassthrough(OperationContext* opCtx,
const Namespaces& namespaces,
DBConfig* conf,
BSONObj cmd,
diff --git a/src/mongo/s/commands/cluster_apply_ops_cmd.cpp b/src/mongo/s/commands/cluster_apply_ops_cmd.cpp
index c8928784422..a0fc658a5dd 100644
--- a/src/mongo/s/commands/cluster_apply_ops_cmd.cpp
+++ b/src/mongo/s/commands/cluster_apply_ops_cmd.cpp
@@ -50,13 +50,13 @@ public:
return true;
}
- Status checkAuthForOperation(OperationContext* txn,
+ Status checkAuthForOperation(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj) override {
- return checkAuthForApplyOpsCommand(txn, dbname, cmdObj);
+ return checkAuthForApplyOpsCommand(opCtx, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbName,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/s/commands/cluster_available_query_options_cmd.cpp b/src/mongo/s/commands/cluster_available_query_options_cmd.cpp
index 0eed4f14c9b..1dcffe67fb4 100644
--- a/src/mongo/s/commands/cluster_available_query_options_cmd.cpp
+++ b/src/mongo/s/commands/cluster_available_query_options_cmd.cpp
@@ -53,7 +53,7 @@ public:
return Status::OK();
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/s/commands/cluster_commands_common.cpp b/src/mongo/s/commands/cluster_commands_common.cpp
index b4035da68c9..3ad8c373b9a 100644
--- a/src/mongo/s/commands/cluster_commands_common.cpp
+++ b/src/mongo/s/commands/cluster_commands_common.cpp
@@ -51,13 +51,13 @@ using std::string;
namespace {
-bool forceRemoteCheckShardVersionCB(OperationContext* txn, const string& ns) {
+bool forceRemoteCheckShardVersionCB(OperationContext* opCtx, const string& ns) {
const NamespaceString nss(ns);
// This will force the database catalog entry to be reloaded
- Grid::get(txn)->catalogCache()->invalidate(nss.db());
+ Grid::get(opCtx)->catalogCache()->invalidate(nss.db());
- auto scopedCMStatus = ScopedChunkManager::get(txn, nss);
+ auto scopedCMStatus = ScopedChunkManager::get(opCtx, nss);
if (!scopedCMStatus.isOK()) {
return false;
}
@@ -113,7 +113,7 @@ void Future::CommandResult::init() {
}
}
-bool Future::CommandResult::join(OperationContext* txn, int maxRetries) {
+bool Future::CommandResult::join(OperationContext* opCtx, int maxRetries) {
if (_done) {
return _ok;
}
@@ -155,7 +155,7 @@ bool Future::CommandResult::join(OperationContext* txn, int maxRetries) {
}
if (i >= maxRetries / 2) {
- if (!forceRemoteCheckShardVersionCB(txn, staleNS)) {
+ if (!forceRemoteCheckShardVersionCB(opCtx, staleNS)) {
error() << "Future::spawnCommand (part 2) no config detected"
<< causedBy(redact(e));
throw e;
@@ -169,7 +169,7 @@ bool Future::CommandResult::join(OperationContext* txn, int maxRetries) {
<< "for lazy command " << redact(_cmd) << ", could not refresh "
<< staleNS;
} else {
- versionManager.checkShardVersionCB(txn, _conn, staleNS, false, 1);
+ versionManager.checkShardVersionCB(opCtx, _conn, staleNS, false, 1);
}
LOG(i > 1 ? 0 : 1) << "retrying lazy command" << causedBy(redact(e));
@@ -242,13 +242,13 @@ bool appendEmptyResultSet(BSONObjBuilder& result, Status status, const std::stri
return Command::appendCommandStatus(result, status);
}
-std::vector<NamespaceString> getAllShardedCollectionsForDb(OperationContext* txn,
+std::vector<NamespaceString> getAllShardedCollectionsForDb(OperationContext* opCtx,
StringData dbName) {
const auto dbNameStr = dbName.toString();
std::vector<CollectionType> collectionsOnConfig;
- uassertStatusOK(Grid::get(txn)->catalogClient(txn)->getCollections(
- txn, &dbNameStr, &collectionsOnConfig, nullptr));
+ uassertStatusOK(Grid::get(opCtx)->catalogClient(opCtx)->getCollections(
+ opCtx, &dbNameStr, &collectionsOnConfig, nullptr));
std::vector<NamespaceString> collectionsToReturn;
for (const auto& coll : collectionsOnConfig) {
diff --git a/src/mongo/s/commands/cluster_commands_common.h b/src/mongo/s/commands/cluster_commands_common.h
index 819fd8738ea..7d6465bc400 100644
--- a/src/mongo/s/commands/cluster_commands_common.h
+++ b/src/mongo/s/commands/cluster_commands_common.h
@@ -73,7 +73,7 @@ public:
blocks until command is done
returns ok()
*/
- bool join(OperationContext* txn, int maxRetries = 1);
+ bool join(OperationContext* opCtx, int maxRetries = 1);
private:
CommandResult(const std::string& server,
@@ -137,7 +137,7 @@ bool appendEmptyResultSet(BSONObjBuilder& result, Status status, const std::stri
*
* Throws exception on errors.
*/
-std::vector<NamespaceString> getAllShardedCollectionsForDb(OperationContext* txn,
+std::vector<NamespaceString> getAllShardedCollectionsForDb(OperationContext* opCtx,
StringData dbName);
} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_control_balancer_cmd.cpp b/src/mongo/s/commands/cluster_control_balancer_cmd.cpp
index 39da91dd362..b1dabe7ad8c 100644
--- a/src/mongo/s/commands/cluster_control_balancer_cmd.cpp
+++ b/src/mongo/s/commands/cluster_control_balancer_cmd.cpp
@@ -77,15 +77,15 @@ public:
return Status::OK();
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
std::string& errmsg,
BSONObjBuilder& result) override {
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto cmdResponse = uassertStatusOK(
- configShard->runCommandWithFixedRetryAttempts(txn,
+ configShard->runCommandWithFixedRetryAttempts(opCtx,
kPrimaryOnlyReadPreference,
"admin",
BSON(_configsvrCommandName << 1),
diff --git a/src/mongo/s/commands/cluster_count_cmd.cpp b/src/mongo/s/commands/cluster_count_cmd.cpp
index ec2798bb62e..2fcf11086b9 100644
--- a/src/mongo/s/commands/cluster_count_cmd.cpp
+++ b/src/mongo/s/commands/cluster_count_cmd.cpp
@@ -100,7 +100,7 @@ public:
out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -168,7 +168,7 @@ public:
}
vector<Strategy::CommandResult> countResult;
- Strategy::commandOp(txn,
+ Strategy::commandOp(opCtx,
dbname,
countCmdBuilder.done(),
options,
@@ -198,7 +198,7 @@ public:
BSONObjBuilder aggResult;
Command::findCommand("aggregate")
- ->run(txn, dbname, aggCmd.getValue(), options, errmsg, aggResult);
+ ->run(opCtx, dbname, aggCmd.getValue(), options, errmsg, aggResult);
result.resetToEmpty();
ViewResponseFormatter formatter(aggResult.obj());
@@ -247,7 +247,7 @@ public:
return true;
}
- virtual Status explain(OperationContext* txn,
+ virtual Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
ExplainCommon::Verbosity verbosity,
@@ -285,7 +285,7 @@ public:
Timer timer;
vector<Strategy::CommandResult> shardResults;
- Strategy::commandOp(txn,
+ Strategy::commandOp(opCtx,
dbname,
explainCmdBob.obj(),
options,
@@ -316,7 +316,7 @@ public:
std::string errMsg;
if (Command::findCommand("aggregate")
- ->run(txn, dbname, aggCmd.getValue(), 0, errMsg, *out)) {
+ ->run(opCtx, dbname, aggCmd.getValue(), 0, errMsg, *out)) {
return Status::OK();
}
@@ -326,7 +326,7 @@ public:
const char* mongosStageName = ClusterExplain::getStageNameForReadOp(shardResults, cmdObj);
return ClusterExplain::buildExplainResult(
- txn, shardResults, mongosStageName, millisElapsed, out);
+ opCtx, shardResults, mongosStageName, millisElapsed, out);
}
} clusterCountCmd;
diff --git a/src/mongo/s/commands/cluster_drop_cmd.cpp b/src/mongo/s/commands/cluster_drop_cmd.cpp
index 7f611f543f8..2c44a5a1dbc 100644
--- a/src/mongo/s/commands/cluster_drop_cmd.cpp
+++ b/src/mongo/s/commands/cluster_drop_cmd.cpp
@@ -72,7 +72,7 @@ public:
out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -80,7 +80,7 @@ public:
BSONObjBuilder& result) override {
const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
- auto scopedDbStatus = ScopedShardDatabase::getExisting(txn, dbname);
+ auto scopedDbStatus = ScopedShardDatabase::getExisting(opCtx, dbname);
if (scopedDbStatus == ErrorCodes::NamespaceNotFound) {
return true;
}
@@ -90,9 +90,9 @@ public:
auto const db = scopedDbStatus.getValue().db();
if (!db->isSharded(nss.ns())) {
- _dropUnshardedCollectionFromShard(txn, db->getPrimaryId(), nss, &result);
+ _dropUnshardedCollectionFromShard(opCtx, db->getPrimaryId(), nss, &result);
} else {
- uassertStatusOK(Grid::get(txn)->catalogClient(txn)->dropCollection(txn, nss));
+ uassertStatusOK(Grid::get(opCtx)->catalogClient(opCtx)->dropCollection(opCtx, nss));
db->markNSNotSharded(nss.ns());
}
@@ -104,13 +104,13 @@ private:
* Sends the 'drop' command for the specified collection to the specified shard. Throws
* DBException on failure.
*/
- static void _dropUnshardedCollectionFromShard(OperationContext* txn,
+ static void _dropUnshardedCollectionFromShard(OperationContext* opCtx,
const ShardId& shardId,
const NamespaceString& nss,
BSONObjBuilder* result) {
- const auto shardRegistry = Grid::get(txn)->shardRegistry();
+ const auto shardRegistry = Grid::get(opCtx)->shardRegistry();
- const auto dropCommandBSON = [shardRegistry, txn, &shardId, &nss] {
+ const auto dropCommandBSON = [shardRegistry, opCtx, &shardId, &nss] {
BSONObjBuilder builder;
builder.append("drop", nss.coll());
@@ -121,17 +121,17 @@ private:
ChunkVersion::UNSHARDED().appendForCommands(&builder);
}
- if (!txn->getWriteConcern().usedDefault) {
+ if (!opCtx->getWriteConcern().usedDefault) {
builder.append(WriteConcernOptions::kWriteConcernField,
- txn->getWriteConcern().toBSON());
+ opCtx->getWriteConcern().toBSON());
}
return builder.obj();
}();
- const auto shard = uassertStatusOK(shardRegistry->getShard(txn, shardId));
+ const auto shard = uassertStatusOK(shardRegistry->getShard(opCtx, shardId));
auto cmdDropResult = uassertStatusOK(shard->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
nss.db().toString(),
dropCommandBSON,
diff --git a/src/mongo/s/commands/cluster_drop_database_cmd.cpp b/src/mongo/s/commands/cluster_drop_database_cmd.cpp
index f227b4aa89a..f86cf073273 100644
--- a/src/mongo/s/commands/cluster_drop_database_cmd.cpp
+++ b/src/mongo/s/commands/cluster_drop_database_cmd.cpp
@@ -72,7 +72,7 @@ public:
out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -86,17 +86,17 @@ public:
"have to pass 1 as db parameter",
cmdObj.firstElement().isNumber() && cmdObj.firstElement().number() == 1);
- auto const catalogClient = Grid::get(txn)->catalogClient(txn);
+ auto const catalogClient = Grid::get(opCtx)->catalogClient(opCtx);
// Lock the database globally to prevent conflicts with simultaneous database
// creation/modification.
auto scopedDistLock = uassertStatusOK(catalogClient->getDistLockManager()->lock(
- txn, dbname, "dropDatabase", DistLockManager::kDefaultLockTimeout));
+ opCtx, dbname, "dropDatabase", DistLockManager::kDefaultLockTimeout));
// Refresh the database metadata so it kicks off a full reload
- Grid::get(txn)->catalogCache()->invalidate(dbname);
+ Grid::get(opCtx)->catalogCache()->invalidate(dbname);
- auto scopedDbStatus = ScopedShardDatabase::getExisting(txn, dbname);
+ auto scopedDbStatus = ScopedShardDatabase::getExisting(opCtx, dbname);
if (scopedDbStatus == ErrorCodes::NamespaceNotFound) {
result.append("info", "database does not exist");
@@ -105,7 +105,7 @@ public:
uassertStatusOK(scopedDbStatus.getStatus());
- catalogClient->logChange(txn,
+ catalogClient->logChange(opCtx,
"dropDatabase.start",
dbname,
BSONObj(),
@@ -114,27 +114,27 @@ public:
auto const db = scopedDbStatus.getValue().db();
// Drop the database's collections from metadata
- for (const auto& nss : getAllShardedCollectionsForDb(txn, dbname)) {
- uassertStatusOK(catalogClient->dropCollection(txn, nss));
+ for (const auto& nss : getAllShardedCollectionsForDb(opCtx, dbname)) {
+ uassertStatusOK(catalogClient->dropCollection(opCtx, nss));
db->markNSNotSharded(nss.ns());
}
// Drop the database from the primary shard first
- _dropDatabaseFromShard(txn, db->getPrimaryId(), dbname);
+ _dropDatabaseFromShard(opCtx, db->getPrimaryId(), dbname);
// Drop the database from each of the remaining shards
{
std::vector<ShardId> allShardIds;
- Grid::get(txn)->shardRegistry()->getAllShardIds(&allShardIds);
+ Grid::get(opCtx)->shardRegistry()->getAllShardIds(&allShardIds);
for (const ShardId& shardId : allShardIds) {
- _dropDatabaseFromShard(txn, shardId, dbname);
+ _dropDatabaseFromShard(opCtx, shardId, dbname);
}
}
// Remove the database entry from the metadata
Status status =
- catalogClient->removeConfigDocuments(txn,
+ catalogClient->removeConfigDocuments(opCtx,
DatabaseType::ConfigNS,
BSON(DatabaseType::name(dbname)),
ShardingCatalogClient::kMajorityWriteConcern);
@@ -146,10 +146,10 @@ public:
}
// Invalidate the database so the next access will do a full reload
- Grid::get(txn)->catalogCache()->invalidate(dbname);
+ Grid::get(opCtx)->catalogCache()->invalidate(dbname);
catalogClient->logChange(
- txn, "dropDatabase", dbname, BSONObj(), ShardingCatalogClient::kMajorityWriteConcern);
+ opCtx, "dropDatabase", dbname, BSONObj(), ShardingCatalogClient::kMajorityWriteConcern);
result.append("dropped", dbname);
return true;
@@ -160,24 +160,25 @@ private:
* Sends the 'dropDatabase' command for the specified database to the specified shard. Throws
* DBException on failure.
*/
- static void _dropDatabaseFromShard(OperationContext* txn,
+ static void _dropDatabaseFromShard(OperationContext* opCtx,
const ShardId& shardId,
const std::string& dbName) {
- const auto dropDatabaseCommandBSON = [txn, &dbName] {
+ const auto dropDatabaseCommandBSON = [opCtx, &dbName] {
BSONObjBuilder builder;
builder.append("dropDatabase", 1);
- if (!txn->getWriteConcern().usedDefault) {
+ if (!opCtx->getWriteConcern().usedDefault) {
builder.append(WriteConcernOptions::kWriteConcernField,
- txn->getWriteConcern().toBSON());
+ opCtx->getWriteConcern().toBSON());
}
return builder.obj();
}();
- const auto shard = uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, shardId));
+ const auto shard =
+ uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId));
auto cmdDropDatabaseResult = uassertStatusOK(shard->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
dbName,
dropDatabaseCommandBSON,
diff --git a/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp b/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp
index 6f8338a0ae8..1db7ea7ef03 100644
--- a/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp
+++ b/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp
@@ -87,7 +87,7 @@ public:
return cmdObj.firstElement().str();
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname_unused,
BSONObj& cmdObj,
int options,
@@ -105,11 +105,11 @@ public:
return false;
}
- uassertStatusOK(Grid::get(txn)->catalogClient(txn)->enableSharding(txn, dbname));
+ uassertStatusOK(Grid::get(opCtx)->catalogClient(opCtx)->enableSharding(opCtx, dbname));
audit::logEnableSharding(Client::getCurrent(), dbname);
// Make sure to force update of any stale metadata
- Grid::get(txn)->catalogCache()->invalidate(dbname);
+ Grid::get(opCtx)->catalogCache()->invalidate(dbname);
return true;
}
diff --git a/src/mongo/s/commands/cluster_explain.cpp b/src/mongo/s/commands/cluster_explain.cpp
index a4268b26f6c..996893fc8be 100644
--- a/src/mongo/s/commands/cluster_explain.cpp
+++ b/src/mongo/s/commands/cluster_explain.cpp
@@ -214,7 +214,7 @@ const char* ClusterExplain::getStageNameForReadOp(
}
// static
-void ClusterExplain::buildPlannerInfo(OperationContext* txn,
+void ClusterExplain::buildPlannerInfo(OperationContext* opCtx,
const vector<Strategy::CommandResult>& shardResults,
const char* mongosStageName,
BSONObjBuilder* out) {
@@ -233,8 +233,8 @@ void ClusterExplain::buildPlannerInfo(OperationContext* txn,
singleShardBob.append("shardName", shardResults[i].shardTargetId.toString());
{
- const auto shard =
- uassertStatusOK(grid.shardRegistry()->getShard(txn, shardResults[i].shardTargetId));
+ const auto shard = uassertStatusOK(
+ grid.shardRegistry()->getShard(opCtx, shardResults[i].shardTargetId));
singleShardBob.append("connectionString", shard->getConnString().toString());
}
appendIfRoom(&singleShardBob, serverInfo, "serverInfo");
@@ -356,7 +356,7 @@ void ClusterExplain::buildExecStats(const vector<Strategy::CommandResult>& shard
}
// static
-Status ClusterExplain::buildExplainResult(OperationContext* txn,
+Status ClusterExplain::buildExplainResult(OperationContext* opCtx,
const vector<Strategy::CommandResult>& shardResults,
const char* mongosStageName,
long long millisElapsed,
@@ -367,7 +367,7 @@ Status ClusterExplain::buildExplainResult(OperationContext* txn,
return validateStatus;
}
- buildPlannerInfo(txn, shardResults, mongosStageName, out);
+ buildPlannerInfo(opCtx, shardResults, mongosStageName, out);
buildExecStats(shardResults, mongosStageName, millisElapsed, out);
return Status::OK();
diff --git a/src/mongo/s/commands/cluster_explain.h b/src/mongo/s/commands/cluster_explain.h
index 600f176337f..38332b15748 100644
--- a/src/mongo/s/commands/cluster_explain.h
+++ b/src/mongo/s/commands/cluster_explain.h
@@ -91,7 +91,7 @@ public:
*
* On success, the output is added to the BSONObj builder 'out'.
*/
- static Status buildExplainResult(OperationContext* txn,
+ static Status buildExplainResult(OperationContext* opCtx,
const std::vector<Strategy::CommandResult>& shardResults,
const char* mongosStageName,
long long millisElapsed,
@@ -120,7 +120,7 @@ private:
* The planner info will display 'mongosStageName' as the name of the execution stage
* performed by mongos after gathering results from the shards.
*/
- static void buildPlannerInfo(OperationContext* txn,
+ static void buildPlannerInfo(OperationContext* opCtx,
const std::vector<Strategy::CommandResult>& shardResults,
const char* mongosStageName,
BSONObjBuilder* out);
diff --git a/src/mongo/s/commands/cluster_explain_cmd.cpp b/src/mongo/s/commands/cluster_explain_cmd.cpp
index 3030589a080..4031a45c02a 100644
--- a/src/mongo/s/commands/cluster_explain_cmd.cpp
+++ b/src/mongo/s/commands/cluster_explain_cmd.cpp
@@ -87,7 +87,7 @@ public:
* the command that you are explaining. The auth check is performed recursively
* on the nested command.
*/
- virtual Status checkAuthForOperation(OperationContext* txn,
+ virtual Status checkAuthForOperation(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj) {
if (Object != cmdObj.firstElement().type()) {
@@ -103,10 +103,10 @@ public:
return Status(ErrorCodes::CommandNotFound, ss);
}
- return commToExplain->checkAuthForOperation(txn, dbname, explainObj);
+ return commToExplain->checkAuthForOperation(opCtx, dbname, explainObj);
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbName,
BSONObj& cmdObj,
int options,
@@ -140,7 +140,7 @@ public:
// Actually call the nested command's explain(...) method.
Status explainStatus =
- commToExplain->explain(txn, dbName, explainObj, verbosity, metadata, &result);
+ commToExplain->explain(opCtx, dbName, explainObj, verbosity, metadata, &result);
if (!explainStatus.isOK()) {
return appendCommandStatus(result, explainStatus);
}
diff --git a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
index d41fa0a5707..578968205af 100644
--- a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
+++ b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
@@ -82,7 +82,7 @@ public:
find_and_modify::addPrivilegesRequiredForFindAndModify(this, dbname, cmdObj, out);
}
- virtual Status explain(OperationContext* txn,
+ virtual Status explain(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& cmdObj,
ExplainCommon::Verbosity verbosity,
@@ -90,20 +90,21 @@ public:
BSONObjBuilder* out) const {
const NamespaceString nss = parseNsCollectionRequired(dbName, cmdObj);
- auto scopedDB = uassertStatusOK(ScopedShardDatabase::getExisting(txn, dbName));
+ auto scopedDB = uassertStatusOK(ScopedShardDatabase::getExisting(opCtx, dbName));
const auto conf = scopedDB.db();
shared_ptr<ChunkManager> chunkMgr;
shared_ptr<Shard> shard;
if (!conf->isSharded(nss.ns())) {
- auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, conf->getPrimaryId());
+ auto shardStatus =
+ Grid::get(opCtx)->shardRegistry()->getShard(opCtx, conf->getPrimaryId());
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
shard = shardStatus.getValue();
} else {
- chunkMgr = _getChunkManager(txn, conf, nss);
+ chunkMgr = _getChunkManager(opCtx, conf, nss);
const BSONObj query = cmdObj.getObjectField("query");
@@ -117,7 +118,7 @@ public:
return collationElementStatus;
}
- StatusWith<BSONObj> status = _getShardKey(txn, chunkMgr, query);
+ StatusWith<BSONObj> status = _getShardKey(opCtx, chunkMgr, query);
if (!status.isOK()) {
return status.getStatus();
}
@@ -125,7 +126,8 @@ public:
BSONObj shardKey = status.getValue();
auto chunk = chunkMgr->findIntersectingChunk(shardKey, collation);
- auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, chunk->getShardId());
+ auto shardStatus =
+ Grid::get(opCtx)->shardRegistry()->getShard(opCtx, chunk->getShardId());
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
@@ -141,7 +143,7 @@ public:
Timer timer;
BSONObjBuilder result;
- bool ok = _runCommand(txn, conf, chunkMgr, shard->getId(), nss, explainCmd.obj(), result);
+ bool ok = _runCommand(opCtx, conf, chunkMgr, shard->getId(), nss, explainCmd.obj(), result);
long long millisElapsed = timer.millis();
if (!ok) {
@@ -159,10 +161,10 @@ public:
shardResults.push_back(cmdResult);
return ClusterExplain::buildExplainResult(
- txn, shardResults, ClusterExplain::kSingleShard, millisElapsed, out);
+ opCtx, shardResults, ClusterExplain::kSingleShard, millisElapsed, out);
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbName,
BSONObj& cmdObj,
int options,
@@ -172,14 +174,14 @@ public:
// findAndModify should only be creating database if upsert is true, but this would require
// that the parsing be pulled into this function.
- auto scopedDb = uassertStatusOK(ScopedShardDatabase::getOrCreate(txn, dbName));
+ auto scopedDb = uassertStatusOK(ScopedShardDatabase::getOrCreate(opCtx, dbName));
const auto conf = scopedDb.db();
if (!conf->isSharded(nss.ns())) {
- return _runCommand(txn, conf, nullptr, conf->getPrimaryId(), nss, cmdObj, result);
+ return _runCommand(opCtx, conf, nullptr, conf->getPrimaryId(), nss, cmdObj, result);
}
- shared_ptr<ChunkManager> chunkMgr = _getChunkManager(txn, conf, nss);
+ shared_ptr<ChunkManager> chunkMgr = _getChunkManager(opCtx, conf, nss);
const BSONObj query = cmdObj.getObjectField("query");
@@ -193,7 +195,7 @@ public:
return appendCommandStatus(result, collationElementStatus);
}
- StatusWith<BSONObj> status = _getShardKey(txn, chunkMgr, query);
+ StatusWith<BSONObj> status = _getShardKey(opCtx, chunkMgr, query);
if (!status.isOK()) {
// Bad query
return appendCommandStatus(result, status.getStatus());
@@ -202,31 +204,32 @@ public:
BSONObj shardKey = status.getValue();
auto chunk = chunkMgr->findIntersectingChunk(shardKey, collation);
- const bool ok = _runCommand(txn, conf, chunkMgr, chunk->getShardId(), nss, cmdObj, result);
+ const bool ok =
+ _runCommand(opCtx, conf, chunkMgr, chunk->getShardId(), nss, cmdObj, result);
if (ok) {
updateChunkWriteStatsAndSplitIfNeeded(
- txn, chunkMgr.get(), chunk.get(), cmdObj.getObjectField("update").objsize());
+ opCtx, chunkMgr.get(), chunk.get(), cmdObj.getObjectField("update").objsize());
}
return ok;
}
private:
- shared_ptr<ChunkManager> _getChunkManager(OperationContext* txn,
+ shared_ptr<ChunkManager> _getChunkManager(OperationContext* opCtx,
DBConfig* conf,
const NamespaceString& nss) const {
- shared_ptr<ChunkManager> chunkMgr = conf->getChunkManager(txn, nss.ns());
+ shared_ptr<ChunkManager> chunkMgr = conf->getChunkManager(opCtx, nss.ns());
massert(13002, "shard internal error chunk manager should never be null", chunkMgr);
return chunkMgr;
}
- StatusWith<BSONObj> _getShardKey(OperationContext* txn,
+ StatusWith<BSONObj> _getShardKey(OperationContext* opCtx,
shared_ptr<ChunkManager> chunkMgr,
const BSONObj& query) const {
// Verify that the query has an equality predicate using the shard key
StatusWith<BSONObj> status =
- chunkMgr->getShardKeyPattern().extractShardKeyFromQuery(txn, query);
+ chunkMgr->getShardKeyPattern().extractShardKeyFromQuery(opCtx, query);
if (!status.isOK()) {
return status;
@@ -242,7 +245,7 @@ private:
return shardKey;
}
- bool _runCommand(OperationContext* txn,
+ bool _runCommand(OperationContext* opCtx,
DBConfig* conf,
shared_ptr<ChunkManager> chunkManager,
const ShardId& shardId,
@@ -251,7 +254,8 @@ private:
BSONObjBuilder& result) const {
BSONObj res;
- const auto shard = uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, shardId));
+ const auto shard =
+ uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId));
ShardConnection conn(shard->getConnString(), nss.ns(), chunkManager);
bool ok = conn->runCommand(conf->name(), cmdObj, res);
diff --git a/src/mongo/s/commands/cluster_find_cmd.cpp b/src/mongo/s/commands/cluster_find_cmd.cpp
index 52edfe5e20e..d308c3a53fd 100644
--- a/src/mongo/s/commands/cluster_find_cmd.cpp
+++ b/src/mongo/s/commands/cluster_find_cmd.cpp
@@ -101,7 +101,7 @@ public:
return AuthorizationSession::get(client)->checkAuthForFind(nss, hasTerm);
}
- Status explain(OperationContext* txn,
+ Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
ExplainCommon::Verbosity verbosity,
@@ -116,7 +116,7 @@ public:
}
auto result = Strategy::explainFind(
- txn, cmdObj, *qr.getValue(), verbosity, serverSelectionMetadata, out);
+ opCtx, cmdObj, *qr.getValue(), verbosity, serverSelectionMetadata, out);
if (result == ErrorCodes::CommandOnShardedViewNotSupportedOnMongod) {
auto resolvedView = ResolvedView::fromBSON(out->asTempObj());
@@ -136,8 +136,8 @@ public:
ClusterAggregate::Namespaces nsStruct;
nsStruct.requestedNss = std::move(nss);
nsStruct.executionNss = std::move(resolvedView.getNamespace());
- auto status =
- ClusterAggregate::runAggregate(txn, nsStruct, aggCmd.getValue(), queryOptions, out);
+ auto status = ClusterAggregate::runAggregate(
+ opCtx, nsStruct, aggCmd.getValue(), queryOptions, out);
appendCommandStatus(*out, status);
return status;
}
@@ -145,7 +145,7 @@ public:
return result;
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -163,7 +163,7 @@ public:
}
auto cq =
- CanonicalQuery::canonicalize(txn, std::move(qr.getValue()), ExtensionsCallbackNoop());
+ CanonicalQuery::canonicalize(opCtx, std::move(qr.getValue()), ExtensionsCallbackNoop());
if (!cq.isOK()) {
return appendCommandStatus(result, cq.getStatus());
}
@@ -181,7 +181,7 @@ public:
std::vector<BSONObj> batch;
BSONObj viewDefinition;
auto cursorId = ClusterFind::runQuery(
- txn, *cq.getValue(), readPref.getValue(), &batch, &viewDefinition);
+ opCtx, *cq.getValue(), readPref.getValue(), &batch, &viewDefinition);
if (!cursorId.isOK()) {
if (cursorId.getStatus() == ErrorCodes::CommandOnShardedViewNotSupportedOnMongod) {
auto aggCmdOnView = cq.getValue()->getQueryRequest().asAggregationCommand();
@@ -203,7 +203,7 @@ public:
nsStruct.requestedNss = std::move(nss);
nsStruct.executionNss = std::move(resolvedView.getNamespace());
auto status = ClusterAggregate::runAggregate(
- txn, nsStruct, aggCmd.getValue(), options, &result);
+ opCtx, nsStruct, aggCmd.getValue(), options, &result);
appendCommandStatus(result, status);
return status.isOK();
}
diff --git a/src/mongo/s/commands/cluster_flush_router_config_cmd.cpp b/src/mongo/s/commands/cluster_flush_router_config_cmd.cpp
index 4f489ab28c1..35150ac3aca 100644
--- a/src/mongo/s/commands/cluster_flush_router_config_cmd.cpp
+++ b/src/mongo/s/commands/cluster_flush_router_config_cmd.cpp
@@ -64,13 +64,13 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
std::string& errmsg,
BSONObjBuilder& result) {
- Grid::get(txn)->catalogCache()->invalidateAll();
+ Grid::get(opCtx)->catalogCache()->invalidateAll();
result.appendBool("flushed", true);
return true;
diff --git a/src/mongo/s/commands/cluster_fsync_cmd.cpp b/src/mongo/s/commands/cluster_fsync_cmd.cpp
index 6de6d08021f..a75c0b3629e 100644
--- a/src/mongo/s/commands/cluster_fsync_cmd.cpp
+++ b/src/mongo/s/commands/cluster_fsync_cmd.cpp
@@ -67,7 +67,7 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -87,14 +87,14 @@ public:
grid.shardRegistry()->getAllShardIds(&shardIds);
for (const ShardId& shardId : shardIds) {
- auto shardStatus = grid.shardRegistry()->getShard(txn, shardId);
+ auto shardStatus = grid.shardRegistry()->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
continue;
}
const auto s = shardStatus.getValue();
auto response = uassertStatusOK(s->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"admin",
BSON("fsync" << 1),
diff --git a/src/mongo/s/commands/cluster_ftdc_commands.cpp b/src/mongo/s/commands/cluster_ftdc_commands.cpp
index 23903e92984..f8d55e352ef 100644
--- a/src/mongo/s/commands/cluster_ftdc_commands.cpp
+++ b/src/mongo/s/commands/cluster_ftdc_commands.cpp
@@ -70,7 +70,7 @@ public:
return Status::OK();
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& db,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/s/commands/cluster_get_last_error_cmd.cpp b/src/mongo/s/commands/cluster_get_last_error_cmd.cpp
index 2b4e386b312..7a81082597f 100644
--- a/src/mongo/s/commands/cluster_get_last_error_cmd.cpp
+++ b/src/mongo/s/commands/cluster_get_last_error_cmd.cpp
@@ -81,7 +81,7 @@ BSONObj buildGLECmdWithOpTime(const BSONObj& gleOptions,
* Returns OK with the LegacyWCResponses containing only write concern error information
* Returns !OK if there was an error getting a GLE response
*/
-Status enforceLegacyWriteConcern(OperationContext* txn,
+Status enforceLegacyWriteConcern(OperationContext* opCtx,
StringData dbName,
const BSONObj& options,
const HostOpTimeMap& hostOpTimes,
@@ -98,7 +98,7 @@ Status enforceLegacyWriteConcern(OperationContext* txn,
const repl::OpTime& opTime = hot.opTime;
const OID& electionId = hot.electionId;
- auto swShard = Grid::get(txn)->shardRegistry()->getShard(txn, shardConnStr.toString());
+ auto swShard = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardConnStr.toString());
if (!swShard.isOK()) {
return swShard.getStatus();
}
@@ -114,9 +114,12 @@ Status enforceLegacyWriteConcern(OperationContext* txn,
// Send the requests and wait to receive all the responses.
const ReadPreferenceSetting readPref(ReadPreference::PrimaryOnly, TagSet());
- AsyncRequestsSender ars(
- txn, Grid::get(txn)->getExecutorPool()->getArbitraryExecutor(), dbName, requests, readPref);
- auto responses = ars.waitForResponses(txn);
+ AsyncRequestsSender ars(opCtx,
+ Grid::get(opCtx)->getExecutorPool()->getArbitraryExecutor(),
+ dbName,
+ requests,
+ readPref);
+ auto responses = ars.waitForResponses(opCtx);
// Parse the responses.
@@ -201,7 +204,7 @@ public:
// No auth required for getlasterror
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -239,7 +242,7 @@ public:
const HostOpTimeMap hostOpTimes(ClusterLastErrorInfo::get(cc()).getPrevHostOpTimes());
std::vector<LegacyWCResponse> wcResponses;
- auto status = enforceLegacyWriteConcern(txn, dbname, cmdObj, hostOpTimes, &wcResponses);
+ auto status = enforceLegacyWriteConcern(opCtx, dbname, cmdObj, hostOpTimes, &wcResponses);
// Don't forget about our last hosts, reset the client info
ClusterLastErrorInfo::get(cc()).disableForCommand();
diff --git a/src/mongo/s/commands/cluster_get_prev_error_cmd.cpp b/src/mongo/s/commands/cluster_get_prev_error_cmd.cpp
index 7eebe3bf5df..57ffc184cc4 100644
--- a/src/mongo/s/commands/cluster_get_prev_error_cmd.cpp
+++ b/src/mongo/s/commands/cluster_get_prev_error_cmd.cpp
@@ -61,7 +61,7 @@ public:
// No auth required
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/s/commands/cluster_get_shard_map_cmd.cpp b/src/mongo/s/commands/cluster_get_shard_map_cmd.cpp
index 48bbdb2a9d4..5e6be95bebe 100644
--- a/src/mongo/s/commands/cluster_get_shard_map_cmd.cpp
+++ b/src/mongo/s/commands/cluster_get_shard_map_cmd.cpp
@@ -67,7 +67,7 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/s/commands/cluster_get_shard_version_cmd.cpp b/src/mongo/s/commands/cluster_get_shard_version_cmd.cpp
index 3ab79ef5364..00e104c1e45 100644
--- a/src/mongo/s/commands/cluster_get_shard_version_cmd.cpp
+++ b/src/mongo/s/commands/cluster_get_shard_version_cmd.cpp
@@ -78,7 +78,7 @@ public:
return parseNsFullyQualified(dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -86,10 +86,10 @@ public:
BSONObjBuilder& result) override {
const NamespaceString nss(parseNs(dbname, cmdObj));
- auto scopedDb = uassertStatusOK(ScopedShardDatabase::getExisting(txn, nss.db()));
+ auto scopedDb = uassertStatusOK(ScopedShardDatabase::getExisting(opCtx, nss.db()));
auto config = scopedDb.db();
- auto cm = config->getChunkManagerIfExists(txn, nss.ns());
+ auto cm = config->getChunkManagerIfExists(opCtx, nss.ns());
uassert(ErrorCodes::NamespaceNotSharded, "ns [" + nss.ns() + " is not sharded.", cm);
for (const auto& cmEntry : cm->getChunkMap()) {
diff --git a/src/mongo/s/commands/cluster_getmore_cmd.cpp b/src/mongo/s/commands/cluster_getmore_cmd.cpp
index dff5c9c7e4a..e0af7c39663 100644
--- a/src/mongo/s/commands/cluster_getmore_cmd.cpp
+++ b/src/mongo/s/commands/cluster_getmore_cmd.cpp
@@ -91,7 +91,7 @@ public:
request.nss, request.cursorid, request.term.is_initialized());
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -106,7 +106,7 @@ public:
}
const GetMoreRequest& request = parseStatus.getValue();
- auto response = ClusterFind::runGetMore(txn, request);
+ auto response = ClusterFind::runGetMore(opCtx, request);
if (!response.isOK()) {
return appendCommandStatus(result, response.getStatus());
}
diff --git a/src/mongo/s/commands/cluster_index_filter_cmd.cpp b/src/mongo/s/commands/cluster_index_filter_cmd.cpp
index 52cdebea5d5..35612ecd08e 100644
--- a/src/mongo/s/commands/cluster_index_filter_cmd.cpp
+++ b/src/mongo/s/commands/cluster_index_filter_cmd.cpp
@@ -91,7 +91,7 @@ public:
}
// Cluster plan cache command entry point.
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -108,7 +108,7 @@ public:
vector<Strategy::CommandResult> results;
const BSONObj query;
Strategy::commandOp(
- txn, dbname, cmdObj, options, nss.ns(), query, CollationSpec::kSimpleSpec, &results);
+ opCtx, dbname, cmdObj, options, nss.ns(), query, CollationSpec::kSimpleSpec, &results);
// Set value of first shard result's "ok" field.
bool clusterCmdResult = true;
diff --git a/src/mongo/s/commands/cluster_is_db_grid_cmd.cpp b/src/mongo/s/commands/cluster_is_db_grid_cmd.cpp
index 5d97540a876..6c9705ff35c 100644
--- a/src/mongo/s/commands/cluster_is_db_grid_cmd.cpp
+++ b/src/mongo/s/commands/cluster_is_db_grid_cmd.cpp
@@ -52,7 +52,7 @@ public:
// No auth required
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/s/commands/cluster_is_master_cmd.cpp b/src/mongo/s/commands/cluster_is_master_cmd.cpp
index 566660ea92d..761aca9bba1 100644
--- a/src/mongo/s/commands/cluster_is_master_cmd.cpp
+++ b/src/mongo/s/commands/cluster_is_master_cmd.cpp
@@ -67,14 +67,14 @@ public:
// No auth required
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
std::string& errmsg,
BSONObjBuilder& result) {
- auto& clientMetadataIsMasterState = ClientMetadataIsMasterState::get(txn->getClient());
+ auto& clientMetadataIsMasterState = ClientMetadataIsMasterState::get(opCtx->getClient());
bool seenIsMaster = clientMetadataIsMasterState.hasSeenIsMaster();
if (!seenIsMaster) {
clientMetadataIsMasterState.setSeenIsMaster();
@@ -97,10 +97,10 @@ public:
invariant(swParseClientMetadata.getValue());
- swParseClientMetadata.getValue().get().logClientMetadata(txn->getClient());
+ swParseClientMetadata.getValue().get().logClientMetadata(opCtx->getClient());
clientMetadataIsMasterState.setClientMetadata(
- txn->getClient(), std::move(swParseClientMetadata.getValue()));
+ opCtx->getClient(), std::move(swParseClientMetadata.getValue()));
}
result.appendBool("ismaster", true);
@@ -119,9 +119,9 @@ public:
"automationServiceDescriptor",
static_cast<ServerParameter*>(nullptr));
if (parameter)
- parameter->append(txn, result, "automationServiceDescriptor");
+ parameter->append(opCtx, result, "automationServiceDescriptor");
- MessageCompressorManager::forSession(txn->getClient()->session())
+ MessageCompressorManager::forSession(opCtx->getClient()->session())
.serverNegotiate(cmdObj, &result);
return true;
diff --git a/src/mongo/s/commands/cluster_kill_op.cpp b/src/mongo/s/commands/cluster_kill_op.cpp
index bfbc556b36b..d636d6922b3 100644
--- a/src/mongo/s/commands/cluster_kill_op.cpp
+++ b/src/mongo/s/commands/cluster_kill_op.cpp
@@ -76,7 +76,7 @@ public:
return isAuthorized ? Status::OK() : Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& db,
BSONObj& cmdObj,
int options,
@@ -103,7 +103,7 @@ public:
log() << "want to kill op: " << redact(opToKill);
// Will throw if shard id is not found
- auto shardStatus = grid.shardRegistry()->getShard(txn, shardIdent);
+ auto shardStatus = grid.shardRegistry()->getShard(opCtx, shardIdent);
if (!shardStatus.isOK()) {
return appendCommandStatus(result, shardStatus.getStatus());
}
diff --git a/src/mongo/s/commands/cluster_killcursors_cmd.cpp b/src/mongo/s/commands/cluster_killcursors_cmd.cpp
index 63235315d4c..e04dad6fc42 100644
--- a/src/mongo/s/commands/cluster_killcursors_cmd.cpp
+++ b/src/mongo/s/commands/cluster_killcursors_cmd.cpp
@@ -40,7 +40,9 @@ public:
ClusterKillCursorsCmd() = default;
private:
- Status _killCursor(OperationContext* txn, const NamespaceString& nss, CursorId cursorId) final {
+ Status _killCursor(OperationContext* opCtx,
+ const NamespaceString& nss,
+ CursorId cursorId) final {
return grid.getCursorManager()->killCursor(nss, cursorId);
}
} clusterKillCursorsCmd;
diff --git a/src/mongo/s/commands/cluster_list_databases_cmd.cpp b/src/mongo/s/commands/cluster_list_databases_cmd.cpp
index 82d519b52bb..6b2d8b9400d 100644
--- a/src/mongo/s/commands/cluster_list_databases_cmd.cpp
+++ b/src/mongo/s/commands/cluster_list_databases_cmd.cpp
@@ -83,7 +83,7 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname_unused,
BSONObj& cmdObj,
int options,
@@ -98,14 +98,14 @@ public:
grid.shardRegistry()->getAllShardIds(&shardIds);
for (const ShardId& shardId : shardIds) {
- const auto shardStatus = grid.shardRegistry()->getShard(txn, shardId);
+ const auto shardStatus = grid.shardRegistry()->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
continue;
}
const auto s = shardStatus.getValue();
auto response = uassertStatusOK(s->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
"admin",
cmdObj,
@@ -166,9 +166,9 @@ public:
}
// Get information for config and admin dbs from the config servers.
- auto catalogClient = grid.catalogClient(txn);
+ auto catalogClient = grid.catalogClient(opCtx);
auto appendStatus =
- catalogClient->appendInfoForConfigServerDatabases(txn, cmdObj, &dbListBuilder);
+ catalogClient->appendInfoForConfigServerDatabases(opCtx, cmdObj, &dbListBuilder);
if (!appendStatus.isOK()) {
return Command::appendCommandStatus(result, appendStatus);
}
diff --git a/src/mongo/s/commands/cluster_list_shards_cmd.cpp b/src/mongo/s/commands/cluster_list_shards_cmd.cpp
index 8d3b19ad8c8..af90c1bff04 100644
--- a/src/mongo/s/commands/cluster_list_shards_cmd.cpp
+++ b/src/mongo/s/commands/cluster_list_shards_cmd.cpp
@@ -68,14 +68,14 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
std::string& errmsg,
BSONObjBuilder& result) {
- auto shardsStatus = grid.catalogClient(txn)->getAllShards(
- txn, repl::ReadConcernLevel::kMajorityReadConcern);
+ auto shardsStatus = grid.catalogClient(opCtx)->getAllShards(
+ opCtx, repl::ReadConcernLevel::kMajorityReadConcern);
if (!shardsStatus.isOK()) {
return appendCommandStatus(result, shardsStatus.getStatus());
}
diff --git a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
index 6d4a4155365..088b8d6d4d1 100644
--- a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
+++ b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
@@ -182,7 +182,7 @@ public:
mr::addPrivilegesRequiredForMapReduce(this, dbname, cmdObj, out);
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -232,7 +232,7 @@ public:
}
// Ensure the input database exists
- auto status = Grid::get(txn)->catalogCache()->getDatabase(txn, dbname);
+ auto status = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbname);
if (!status.isOK()) {
return appendCommandStatus(result, status.getStatus());
}
@@ -242,7 +242,7 @@ public:
shared_ptr<DBConfig> confOut;
if (customOutDB) {
// Create the output database implicitly, since we have a custom output requested
- auto scopedDb = uassertStatusOK(ScopedShardDatabase::getOrCreate(txn, outDB));
+ auto scopedDb = uassertStatusOK(ScopedShardDatabase::getOrCreate(opCtx, outDB));
confOut = scopedDb.getSharedDbReference();
} else {
confOut = confIn;
@@ -274,14 +274,14 @@ public:
maxChunkSizeBytes = cmdObj["maxChunkSizeBytes"].numberLong();
if (maxChunkSizeBytes == 0) {
maxChunkSizeBytes =
- Grid::get(txn)->getBalancerConfiguration()->getMaxChunkSizeBytes();
+ Grid::get(opCtx)->getBalancerConfiguration()->getMaxChunkSizeBytes();
}
// maxChunkSizeBytes is sent as int BSON field
invariant(maxChunkSizeBytes < std::numeric_limits<int>::max());
}
- const auto shardRegistry = Grid::get(txn)->shardRegistry();
+ const auto shardRegistry = Grid::get(opCtx)->shardRegistry();
// modify command to run on shards with output to tmp collection
string badShardedField;
@@ -292,7 +292,7 @@ public:
LOG(1) << "simple MR, just passthrough";
const auto shard =
- uassertStatusOK(shardRegistry->getShard(txn, confIn->getPrimaryId()));
+ uassertStatusOK(shardRegistry->getShard(opCtx, confIn->getPrimaryId()));
ShardConnection conn(shard->getConnString(), "");
@@ -338,7 +338,7 @@ public:
try {
Strategy::commandOp(
- txn, dbname, shardedCommand, 0, nss.ns(), q, collation, &mrCommandResults);
+ opCtx, dbname, shardedCommand, 0, nss.ns(), q, collation, &mrCommandResults);
} catch (DBException& e) {
e.addContext(str::stream() << "could not run map command on all shards for ns "
<< nss.ns()
@@ -352,7 +352,7 @@ public:
string server;
{
const auto shard =
- uassertStatusOK(shardRegistry->getShard(txn, mrResult.shardTargetId));
+ uassertStatusOK(shardRegistry->getShard(opCtx, mrResult.shardTargetId));
server = shard->getConnString().toString();
}
servers.insert(server);
@@ -413,7 +413,7 @@ public:
finalCmd.append("inputDB", dbname);
finalCmd.append("shardedOutputCollection", shardResultCollection);
finalCmd.append("shards", shardResultsB.done());
- finalCmd.append("writeConcern", txn->getWriteConcern().toBSON());
+ finalCmd.append("writeConcern", opCtx->getWriteConcern().toBSON());
BSONObj shardCounts = shardCountsB.done();
finalCmd.append("shardCounts", shardCounts);
@@ -446,7 +446,7 @@ public:
if (!shardedOutput) {
const auto shard =
- uassertStatusOK(shardRegistry->getShard(txn, confOut->getPrimaryId()));
+ uassertStatusOK(shardRegistry->getShard(opCtx, confOut->getPrimaryId()));
LOG(1) << "MR with single shard output, NS=" << outputCollNss.ns()
<< " primary=" << shard->toString();
@@ -472,20 +472,20 @@ public:
// Create the sharded collection if needed
if (!confOut->isSharded(outputCollNss.ns())) {
// Enable sharding on the output db
- Status status = Grid::get(txn)->catalogClient(txn)->enableSharding(
- txn, outputCollNss.db().toString());
+ Status status = Grid::get(opCtx)->catalogClient(opCtx)->enableSharding(
+ opCtx, outputCollNss.db().toString());
// If the database has sharding already enabled, we can ignore the error
if (status.isOK()) {
// Invalidate the output database so it gets reloaded on the next fetch attempt
- Grid::get(txn)->catalogCache()->invalidate(outputCollNss.db());
+ Grid::get(opCtx)->catalogCache()->invalidate(outputCollNss.db());
} else if (status != ErrorCodes::AlreadyInitialized) {
uassertStatusOK(status);
}
confOut.reset();
- confOut = uassertStatusOK(Grid::get(txn)->catalogCache()->getDatabase(
- txn, outputCollNss.db().toString()));
+ confOut = uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase(
+ opCtx, outputCollNss.db().toString()));
// Shard collection according to split points
vector<BSONObj> sortedSplitPts;
@@ -523,24 +523,24 @@ public:
BSONObj defaultCollation;
uassertStatusOK(
- Grid::get(txn)->catalogClient(txn)->shardCollection(txn,
- outputCollNss.ns(),
- sortKeyPattern,
- defaultCollation,
- true,
- sortedSplitPts,
- outShardIds));
+ Grid::get(opCtx)->catalogClient(opCtx)->shardCollection(opCtx,
+ outputCollNss.ns(),
+ sortKeyPattern,
+ defaultCollation,
+ true,
+ sortedSplitPts,
+ outShardIds));
// Make sure the cached metadata for the collection knows that we are now sharded
- confOut->getChunkManager(txn, outputCollNss.ns(), true /* reload */);
+ confOut->getChunkManager(opCtx, outputCollNss.ns(), true /* reload */);
}
auto chunkSizes = SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<int>();
{
// Take distributed lock to prevent split / migration.
auto scopedDistLock =
- Grid::get(txn)->catalogClient(txn)->getDistLockManager()->lock(
- txn, outputCollNss.ns(), "mr-post-process", kNoDistLockTimeout);
+ Grid::get(opCtx)->catalogClient(opCtx)->getDistLockManager()->lock(
+ opCtx, outputCollNss.ns(), "mr-post-process", kNoDistLockTimeout);
if (!scopedDistLock.isOK()) {
return appendCommandStatus(result, scopedDistLock.getStatus());
}
@@ -550,7 +550,7 @@ public:
try {
const BSONObj query;
- Strategy::commandOp(txn,
+ Strategy::commandOp(opCtx,
outDB,
finalCmdObj,
0,
@@ -570,8 +570,9 @@ public:
for (const auto& mrResult : mrCommandResults) {
string server;
{
- const auto shard = uassertStatusOK(
- Grid::get(txn)->shardRegistry()->getShard(txn, mrResult.shardTargetId));
+ const auto shard =
+ uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(
+ opCtx, mrResult.shardTargetId));
server = shard->getConnString().toString();
}
singleResult = mrResult.result;
@@ -609,7 +610,8 @@ public:
}
// Do the splitting round
- shared_ptr<ChunkManager> cm = confOut->getChunkManagerIfExists(txn, outputCollNss.ns());
+ shared_ptr<ChunkManager> cm =
+ confOut->getChunkManagerIfExists(opCtx, outputCollNss.ns());
uassert(34359,
str::stream() << "Failed to write mapreduce output to " << outputCollNss.ns()
<< "; expected that collection to be sharded, but it was not",
@@ -626,7 +628,7 @@ public:
warning() << "Mongod reported " << size << " bytes inserted for key " << key
<< " but can't find chunk";
} else {
- updateChunkWriteStatsAndSplitIfNeeded(txn, cm.get(), c.get(), size);
+ updateChunkWriteStatsAndSplitIfNeeded(opCtx, cm.get(), c.get(), size);
}
}
}
diff --git a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
index 2aaeeaeabb0..6b247823381 100644
--- a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
+++ b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
@@ -99,7 +99,7 @@ public:
static BSONField<string> configField;
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -107,7 +107,7 @@ public:
BSONObjBuilder& result) {
const NamespaceString nss(parseNs(dbname, cmdObj));
- auto scopedCM = uassertStatusOK(ScopedChunkManager::refreshAndGet(txn, nss));
+ auto scopedCM = uassertStatusOK(ScopedChunkManager::refreshAndGet(opCtx, nss));
vector<BSONObj> bounds;
if (!FieldParser::extract(cmdObj, boundsField, &bounds, &errmsg)) {
@@ -158,7 +158,7 @@ public:
remoteCmdObjB.append(cmdObj[ClusterMergeChunksCommand::boundsField()]);
remoteCmdObjB.append(
ClusterMergeChunksCommand::configField(),
- Grid::get(txn)->shardRegistry()->getConfigServerConnectionString().toString());
+ Grid::get(opCtx)->shardRegistry()->getConfigServerConnectionString().toString());
remoteCmdObjB.append(ClusterMergeChunksCommand::shardNameField(),
firstChunk->getShardId().toString());
@@ -167,7 +167,7 @@ public:
// Throws, but handled at level above. Don't want to rewrap to preserve exception
// formatting.
const auto shardStatus =
- Grid::get(txn)->shardRegistry()->getShard(txn, firstChunk->getShardId());
+ Grid::get(opCtx)->shardRegistry()->getShard(opCtx, firstChunk->getShardId());
if (!shardStatus.isOK()) {
return appendCommandStatus(
result,
diff --git a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
index af94cb7396f..c3cb18ceb15 100644
--- a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
@@ -96,7 +96,7 @@ public:
return parseNsFullyQualified(dbname, cmdObj);
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -106,7 +106,7 @@ public:
const NamespaceString nss(parseNs(dbname, cmdObj));
- auto scopedCM = uassertStatusOK(ScopedChunkManager::refreshAndGet(txn, nss));
+ auto scopedCM = uassertStatusOK(ScopedChunkManager::refreshAndGet(opCtx, nss));
const auto toElt = cmdObj["to"];
uassert(ErrorCodes::TypeMismatch,
@@ -118,7 +118,7 @@ public:
return false;
}
- const auto toStatus = Grid::get(txn)->shardRegistry()->getShard(txn, toString);
+ const auto toStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, toString);
if (!toStatus.isOK()) {
string msg(str::stream() << "Could not move chunk in '" << nss.ns() << "' to shard '"
<< toString
@@ -132,7 +132,8 @@ public:
// so far, chunk size serves test purposes; it may or may not become a supported parameter
long long maxChunkSizeBytes = cmdObj["maxChunkSizeBytes"].numberLong();
if (maxChunkSizeBytes == 0) {
- maxChunkSizeBytes = Grid::get(txn)->getBalancerConfiguration()->getMaxChunkSizeBytes();
+ maxChunkSizeBytes =
+ Grid::get(opCtx)->getBalancerConfiguration()->getMaxChunkSizeBytes();
}
BSONObj find = cmdObj.getObjectField("find");
@@ -151,7 +152,7 @@ public:
if (!find.isEmpty()) {
// find
BSONObj shardKey =
- uassertStatusOK(cm->getShardKeyPattern().extractShardKeyFromQuery(txn, find));
+ uassertStatusOK(cm->getShardKeyPattern().extractShardKeyFromQuery(opCtx, find));
if (shardKey.isEmpty()) {
errmsg = str::stream() << "no shard key found in chunk query " << find;
return false;
@@ -191,7 +192,7 @@ public:
chunkType.setShard(chunk->getShardId());
chunkType.setVersion(cm->getVersion());
- uassertStatusOK(configsvr_client::moveChunk(txn,
+ uassertStatusOK(configsvr_client::moveChunk(opCtx,
chunkType,
to->getId(),
maxChunkSizeBytes,
@@ -200,7 +201,7 @@ public:
// Proactively refresh the chunk manager. Not strictly necessary, but this way it's
// immediately up-to-date the next time it's used.
- scopedCM.db()->getChunkManagerIfExists(txn, nss.ns(), true);
+ scopedCM.db()->getChunkManagerIfExists(opCtx, nss.ns(), true);
result.append("millis", t.millis());
return true;
diff --git a/src/mongo/s/commands/cluster_move_primary_cmd.cpp b/src/mongo/s/commands/cluster_move_primary_cmd.cpp
index 652c204c60e..dc192f1a6a6 100644
--- a/src/mongo/s/commands/cluster_move_primary_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_primary_cmd.cpp
@@ -101,7 +101,7 @@ public:
return nsElt.str();
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname_unused,
BSONObj& cmdObj,
int options,
@@ -120,14 +120,14 @@ public:
return false;
}
- auto const catalogClient = Grid::get(txn)->catalogClient(txn);
- auto const catalogCache = Grid::get(txn)->catalogCache();
- auto const shardRegistry = Grid::get(txn)->shardRegistry();
+ auto const catalogClient = Grid::get(opCtx)->catalogClient(opCtx);
+ auto const catalogCache = Grid::get(opCtx)->catalogCache();
+ auto const shardRegistry = Grid::get(opCtx)->shardRegistry();
// Flush all cached information. This can't be perfect, but it's better than nothing.
catalogCache->invalidate(dbname);
- auto config = uassertStatusOK(catalogCache->getDatabase(txn, dbname));
+ auto config = uassertStatusOK(catalogCache->getDatabase(opCtx, dbname));
const auto toElt = cmdObj["to"];
uassert(ErrorCodes::TypeMismatch,
@@ -140,10 +140,10 @@ public:
}
const auto fromShard =
- uassertStatusOK(shardRegistry->getShard(txn, config->getPrimaryId()));
+ uassertStatusOK(shardRegistry->getShard(opCtx, config->getPrimaryId()));
const auto toShard = [&]() {
- auto toShardStatus = shardRegistry->getShard(txn, to);
+ auto toShardStatus = shardRegistry->getShard(opCtx, to);
if (!toShardStatus.isOK()) {
const std::string msg(
str::stream() << "Could not move database '" << dbname << "' to shard '" << to
@@ -165,13 +165,13 @@ public:
const std::string whyMessage(str::stream() << "Moving primary shard of " << dbname);
auto scopedDistLock = uassertStatusOK(catalogClient->getDistLockManager()->lock(
- txn, dbname + "-movePrimary", whyMessage, DistLockManager::kDefaultLockTimeout));
+ opCtx, dbname + "-movePrimary", whyMessage, DistLockManager::kDefaultLockTimeout));
- const auto shardedColls = getAllShardedCollectionsForDb(txn, dbname);
+ const auto shardedColls = getAllShardedCollectionsForDb(opCtx, dbname);
// Record start in changelog
catalogClient->logChange(
- txn,
+ opCtx,
"movePrimary.start",
dbname,
_buildMoveLogEntry(dbname, fromShard->toString(), toShard->toString(), shardedColls),
@@ -197,7 +197,7 @@ public:
<< bypassDocumentValidationCommandOption()
<< true
<< "writeConcern"
- << txn->getWriteConcern().toBSON()),
+ << opCtx->getWriteConcern().toBSON()),
cloneRes);
toconn.done();
@@ -215,10 +215,10 @@ public:
// Update the new primary in the config server metadata
{
- auto dbt = uassertStatusOK(catalogClient->getDatabase(txn, dbname)).value;
+ auto dbt = uassertStatusOK(catalogClient->getDatabase(opCtx, dbname)).value;
dbt.setPrimary(toShard->getId());
- uassertStatusOK(catalogClient->updateDatabase(txn, dbname, dbt));
+ uassertStatusOK(catalogClient->updateDatabase(opCtx, dbname, dbt));
}
// Ensure the next attempt to retrieve the database or any of its collections will do a full
@@ -236,7 +236,7 @@ public:
try {
BSONObj dropDBInfo;
- fromconn->dropDatabase(dbname.c_str(), txn->getWriteConcern(), &dropDBInfo);
+ fromconn->dropDatabase(dbname.c_str(), opCtx->getWriteConcern(), &dropDBInfo);
if (!hasWCError) {
if (auto wcErrorElem = dropDBInfo["writeConcernError"]) {
appendWriteConcernErrorToCmdResponse(
@@ -269,7 +269,7 @@ public:
<< oldPrimary;
BSONObj dropCollInfo;
fromconn->dropCollection(
- el.String(), txn->getWriteConcern(), &dropCollInfo);
+ el.String(), opCtx->getWriteConcern(), &dropCollInfo);
if (!hasWCError) {
if (auto wcErrorElem = dropCollInfo["writeConcernError"]) {
appendWriteConcernErrorToCmdResponse(
@@ -296,7 +296,7 @@ public:
// Record finish in changelog
catalogClient->logChange(
- txn,
+ opCtx,
"movePrimary",
dbname,
_buildMoveLogEntry(dbname, oldPrimary, toShard->toString(), shardedColls),
diff --git a/src/mongo/s/commands/cluster_netstat_cmd.cpp b/src/mongo/s/commands/cluster_netstat_cmd.cpp
index fd5a2b2e87d..0d50223c112 100644
--- a/src/mongo/s/commands/cluster_netstat_cmd.cpp
+++ b/src/mongo/s/commands/cluster_netstat_cmd.cpp
@@ -65,7 +65,7 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/s/commands/cluster_pipeline_cmd.cpp b/src/mongo/s/commands/cluster_pipeline_cmd.cpp
index 46ab48b39a2..e533ae05392 100644
--- a/src/mongo/s/commands/cluster_pipeline_cmd.cpp
+++ b/src/mongo/s/commands/cluster_pipeline_cmd.cpp
@@ -72,7 +72,7 @@ public:
return AuthorizationSession::get(client)->checkAuthForAggregate(nss, cmdObj);
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -83,7 +83,7 @@ public:
ClusterAggregate::Namespaces nsStruct;
nsStruct.requestedNss = nss;
nsStruct.executionNss = std::move(nss);
- auto status = ClusterAggregate::runAggregate(txn, nsStruct, cmdObj, options, &result);
+ auto status = ClusterAggregate::runAggregate(opCtx, nsStruct, cmdObj, options, &result);
appendCommandStatus(result, status);
return status.isOK();
}
diff --git a/src/mongo/s/commands/cluster_plan_cache_cmd.cpp b/src/mongo/s/commands/cluster_plan_cache_cmd.cpp
index fea10e3de88..b29eff8b0b2 100644
--- a/src/mongo/s/commands/cluster_plan_cache_cmd.cpp
+++ b/src/mongo/s/commands/cluster_plan_cache_cmd.cpp
@@ -86,7 +86,7 @@ public:
}
// Cluster plan cache command entry point.
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -110,7 +110,7 @@ private:
// Cluster plan cache command implementation(s) below
//
-bool ClusterPlanCacheCmd::run(OperationContext* txn,
+bool ClusterPlanCacheCmd::run(OperationContext* opCtx,
const std::string& dbName,
BSONObj& cmdObj,
int options,
@@ -124,7 +124,7 @@ bool ClusterPlanCacheCmd::run(OperationContext* txn,
vector<Strategy::CommandResult> results;
const BSONObj query;
Strategy::commandOp(
- txn, dbName, cmdObj, options, nss.ns(), query, CollationSpec::kSimpleSpec, &results);
+ opCtx, dbName, cmdObj, options, nss.ns(), query, CollationSpec::kSimpleSpec, &results);
// Set value of first shard result's "ok" field.
bool clusterCmdResult = true;
diff --git a/src/mongo/s/commands/cluster_profile_cmd.cpp b/src/mongo/s/commands/cluster_profile_cmd.cpp
index dcdc63d4bb6..74d6bf57cde 100644
--- a/src/mongo/s/commands/cluster_profile_cmd.cpp
+++ b/src/mongo/s/commands/cluster_profile_cmd.cpp
@@ -58,7 +58,7 @@ public:
out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/s/commands/cluster_remove_shard_cmd.cpp b/src/mongo/s/commands/cluster_remove_shard_cmd.cpp
index 6aeee9bd7e7..edf4ca92c57 100644
--- a/src/mongo/s/commands/cluster_remove_shard_cmd.cpp
+++ b/src/mongo/s/commands/cluster_remove_shard_cmd.cpp
@@ -79,7 +79,7 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -91,7 +91,7 @@ public:
cmdObj.firstElement().type() == BSONType::String);
const string target = cmdObj.firstElement().str();
- const auto shardStatus = grid.shardRegistry()->getShard(txn, ShardId(target));
+ const auto shardStatus = grid.shardRegistry()->getShard(opCtx, ShardId(target));
if (!shardStatus.isOK()) {
string msg(str::stream() << "Could not drop shard '" << target
<< "' because it does not exist");
@@ -100,15 +100,15 @@ public:
}
const auto s = shardStatus.getValue();
- auto catalogClient = grid.catalogClient(txn);
+ auto catalogClient = grid.catalogClient(opCtx);
StatusWith<ShardDrainingStatus> removeShardResult =
- catalogClient->removeShard(txn, s->getId());
+ catalogClient->removeShard(opCtx, s->getId());
if (!removeShardResult.isOK()) {
return appendCommandStatus(result, removeShardResult.getStatus());
}
vector<string> databases;
- Status status = catalogClient->getDatabasesForShard(txn, s->getId(), &databases);
+ Status status = catalogClient->getDatabasesForShard(opCtx, s->getId(), &databases);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -142,7 +142,7 @@ public:
case ShardDrainingStatus::ONGOING: {
vector<ChunkType> chunks;
Status status =
- catalogClient->getChunks(txn,
+ catalogClient->getChunks(opCtx,
BSON(ChunkType::shard(s->getId().toString())),
BSONObj(),
boost::none, // return all
diff --git a/src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp b/src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp
index 028f79e32ae..18337e99d29 100644
--- a/src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp
+++ b/src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp
@@ -103,7 +103,7 @@ public:
return Status::OK();
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -116,9 +116,9 @@ public:
parsedRequest.appendAsConfigCommand(&cmdBuilder);
cmdBuilder.append("writeConcern", kMajorityWriteConcern.toBSON());
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto cmdResponseStatus = uassertStatusOK(
- configShard->runCommandWithFixedRetryAttempts(txn,
+ configShard->runCommandWithFixedRetryAttempts(opCtx,
kPrimaryOnlyReadPreference,
"admin",
cmdBuilder.obj(),
diff --git a/src/mongo/s/commands/cluster_repl_set_get_status_cmd.cpp b/src/mongo/s/commands/cluster_repl_set_get_status_cmd.cpp
index 5c342b14489..e3474ed8393 100644
--- a/src/mongo/s/commands/cluster_repl_set_get_status_cmd.cpp
+++ b/src/mongo/s/commands/cluster_repl_set_get_status_cmd.cpp
@@ -64,7 +64,7 @@ public:
return Status::OK();
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/s/commands/cluster_reset_error_cmd.cpp b/src/mongo/s/commands/cluster_reset_error_cmd.cpp
index 899d51b658e..cad0ffe8740 100644
--- a/src/mongo/s/commands/cluster_reset_error_cmd.cpp
+++ b/src/mongo/s/commands/cluster_reset_error_cmd.cpp
@@ -59,7 +59,7 @@ public:
// No auth required
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/s/commands/cluster_set_feature_compatibility_version_cmd.cpp b/src/mongo/s/commands/cluster_set_feature_compatibility_version_cmd.cpp
index 54a6d9d9bb7..59b2d6ef071 100644
--- a/src/mongo/s/commands/cluster_set_feature_compatibility_version_cmd.cpp
+++ b/src/mongo/s/commands/cluster_set_feature_compatibility_version_cmd.cpp
@@ -83,7 +83,7 @@ public:
return Status::OK();
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -93,9 +93,9 @@ public:
FeatureCompatibilityVersionCommandParser::extractVersionFromCommand(getName(), cmdObj));
// Forward to config shard, which will forward to all shards.
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto response = uassertStatusOK(configShard->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
dbname,
BSON("_configsvrSetFeatureCompatibilityVersion" << version),
diff --git a/src/mongo/s/commands/cluster_shard_collection_cmd.cpp b/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
index 79f792e0c7b..7692e764e02 100644
--- a/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
+++ b/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
@@ -116,7 +116,7 @@ BSONObj createIndexDoc(const std::string& ns,
/**
* Used only for writes to the config server, config and admin databases.
*/
-Status clusterCreateIndex(OperationContext* txn,
+Status clusterCreateIndex(OperationContext* opCtx,
const std::string& ns,
const BSONObj& keys,
const BSONObj& collation,
@@ -134,7 +134,7 @@ Status clusterCreateIndex(OperationContext* txn,
BatchedCommandResponse response;
ClusterWriter writer(false, 0);
- writer.write(txn, request, &response);
+ writer.write(opCtx, request, &response);
return response.toStatus();
}
@@ -177,7 +177,7 @@ public:
return parseNsFullyQualified(dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -185,10 +185,10 @@ public:
BSONObjBuilder& result) override {
const NamespaceString nss(parseNs(dbname, cmdObj));
- auto const catalogClient = Grid::get(txn)->catalogClient(txn);
- auto const shardRegistry = Grid::get(txn)->shardRegistry();
+ auto const catalogClient = Grid::get(opCtx)->catalogClient(opCtx);
+ auto const shardRegistry = Grid::get(opCtx)->shardRegistry();
- auto scopedShardedDb = uassertStatusOK(ScopedShardDatabase::getExisting(txn, nss.db()));
+ auto scopedShardedDb = uassertStatusOK(ScopedShardDatabase::getExisting(opCtx, nss.db()));
const auto config = scopedShardedDb.db();
// Ensure sharding is allowed on the database
@@ -238,7 +238,7 @@ public:
bsonExtractTypedField(cmdObj, "collation", BSONType::Object, &collationElement);
if (collationStatus.isOK()) {
// Ensure that the collation is valid. Currently we only allow the simple collation.
- auto collator = CollatorFactoryInterface::get(txn->getServiceContext())
+ auto collator = CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(collationElement.Obj());
if (!collator.getStatus().isOK()) {
return appendCommandStatus(result, collator.getStatus());
@@ -281,7 +281,7 @@ public:
// The rest of the checks require a connection to the primary db
const ConnectionString shardConnString = [&]() {
const auto shard =
- uassertStatusOK(shardRegistry->getShard(txn, config->getPrimaryId()));
+ uassertStatusOK(shardRegistry->getShard(opCtx, config->getPrimaryId()));
return shard->getConnString();
}();
@@ -503,7 +503,7 @@ public:
BSONObj collationArg =
!defaultCollation.isEmpty() ? CollationSpec::kSimpleSpec : BSONObj();
Status status =
- clusterCreateIndex(txn, nss.ns(), proposedKey, collationArg, careAboutUnique);
+ clusterCreateIndex(opCtx, nss.ns(), proposedKey, collationArg, careAboutUnique);
if (!status.isOK()) {
errmsg = str::stream() << "ensureIndex failed to create index on "
<< "primary shard: " << status.reason();
@@ -582,7 +582,7 @@ public:
audit::logShardCollection(Client::getCurrent(), nss.ns(), proposedKey, careAboutUnique);
- uassertStatusOK(catalogClient->shardCollection(txn,
+ uassertStatusOK(catalogClient->shardCollection(opCtx,
nss.ns(),
proposedShardKey,
defaultCollation,
@@ -591,7 +591,7 @@ public:
std::set<ShardId>{}));
// Make sure the cached metadata for the collection knows that we are now sharded
- config->getChunkManager(txn, nss.ns(), true /* reload */);
+ config->getChunkManager(opCtx, nss.ns(), true /* reload */);
result << "collectionsharded" << nss.ns();
@@ -599,14 +599,14 @@ public:
if (isHashedShardKey && isEmpty) {
// Reload the new config info. If we created more than one initial chunk, then
// we need to move them around to balance.
- auto chunkManager = config->getChunkManager(txn, nss.ns(), true);
+ auto chunkManager = config->getChunkManager(opCtx, nss.ns(), true);
ChunkMap chunkMap = chunkManager->getChunkMap();
// 2. Move and commit each "big chunk" to a different shard.
int i = 0;
for (ChunkMap::const_iterator c = chunkMap.begin(); c != chunkMap.end(); ++c, ++i) {
const ShardId& shardId = shardIds[i % numShards];
- const auto toStatus = shardRegistry->getShard(txn, shardId);
+ const auto toStatus = shardRegistry->getShard(opCtx, shardId);
if (!toStatus.isOK()) {
continue;
}
@@ -627,10 +627,10 @@ public:
chunkType.setVersion(chunkManager->getVersion());
Status moveStatus = configsvr_client::moveChunk(
- txn,
+ opCtx,
chunkType,
to->getId(),
- Grid::get(txn)->getBalancerConfiguration()->getMaxChunkSizeBytes(),
+ Grid::get(opCtx)->getBalancerConfiguration()->getMaxChunkSizeBytes(),
MigrationSecondaryThrottleOptions::create(
MigrationSecondaryThrottleOptions::kOff),
true);
@@ -646,7 +646,7 @@ public:
}
// Reload the config info, after all the migrations
- chunkManager = config->getChunkManager(txn, nss.ns(), true);
+ chunkManager = config->getChunkManager(opCtx, nss.ns(), true);
// 3. Subdivide the big chunks by splitting at each of the points in "allSplits"
// that we haven't already split by.
@@ -658,7 +658,7 @@ public:
if (i == allSplits.size() || !currentChunk->containsKey(allSplits[i])) {
if (!subSplits.empty()) {
auto splitStatus = shardutil::splitChunkAtMultiplePoints(
- txn,
+ opCtx,
currentChunk->getShardId(),
nss,
chunkManager->getShardKeyPattern(),
@@ -692,7 +692,7 @@ public:
// Proactively refresh the chunk manager. Not really necessary, but this way it's
// immediately up-to-date the next time it's used.
- config->getChunkManager(txn, nss.ns(), true);
+ config->getChunkManager(opCtx, nss.ns(), true);
}
return true;
diff --git a/src/mongo/s/commands/cluster_shutdown_cmd.cpp b/src/mongo/s/commands/cluster_shutdown_cmd.cpp
index 72f0fd71e6f..95db8c0c478 100644
--- a/src/mongo/s/commands/cluster_shutdown_cmd.cpp
+++ b/src/mongo/s/commands/cluster_shutdown_cmd.cpp
@@ -41,7 +41,7 @@ public:
<< "either (1) ran from localhost or (2) authenticated.";
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/s/commands/cluster_split_cmd.cpp b/src/mongo/s/commands/cluster_split_cmd.cpp
index 57e5a54881b..b63da3b2ee7 100644
--- a/src/mongo/s/commands/cluster_split_cmd.cpp
+++ b/src/mongo/s/commands/cluster_split_cmd.cpp
@@ -55,7 +55,7 @@ namespace {
* Asks the mongod holding this chunk to find a key that approximately divides the specified chunk
* in two. Throws on error or if the chunk is empty.
*/
-BSONObj selectMedianKey(OperationContext* txn,
+BSONObj selectMedianKey(OperationContext* opCtx,
const ShardId& shardId,
const NamespaceString& nss,
const ShardKeyPattern& shardKeyPattern,
@@ -66,10 +66,10 @@ BSONObj selectMedianKey(OperationContext* txn,
chunkRange.append(&cmd);
cmd.appendBool("force", true);
- auto shard = uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, shardId));
+ auto shard = uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId));
auto cmdResponse = uassertStatusOK(
- shard->runCommandWithFixedRetryAttempts(txn,
+ shard->runCommandWithFixedRetryAttempts(opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"admin",
cmd.obj(),
@@ -126,7 +126,7 @@ public:
return parseNsFullyQualified(dbname, cmdObj);
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -134,7 +134,7 @@ public:
BSONObjBuilder& result) {
const NamespaceString nss(parseNs(dbname, cmdObj));
- auto scopedCM = uassertStatusOK(ScopedChunkManager::refreshAndGet(txn, nss));
+ auto scopedCM = uassertStatusOK(ScopedChunkManager::refreshAndGet(opCtx, nss));
const BSONField<BSONObj> findField("find", BSONObj());
const BSONField<BSONArray> boundsField("bounds", BSONArray());
@@ -197,7 +197,7 @@ public:
if (!find.isEmpty()) {
// find
BSONObj shardKey =
- uassertStatusOK(cm->getShardKeyPattern().extractShardKeyFromQuery(txn, find));
+ uassertStatusOK(cm->getShardKeyPattern().extractShardKeyFromQuery(opCtx, find));
if (shardKey.isEmpty()) {
errmsg = stream() << "no shard key found in chunk query " << find;
return false;
@@ -255,7 +255,7 @@ public:
// middle of the chunk.
const BSONObj splitPoint = !middle.isEmpty()
? middle
- : selectMedianKey(txn,
+ : selectMedianKey(opCtx,
chunk->getShardId(),
nss,
cm->getShardKeyPattern(),
@@ -267,7 +267,7 @@ public:
<< redact(splitPoint);
uassertStatusOK(
- shardutil::splitChunkAtMultiplePoints(txn,
+ shardutil::splitChunkAtMultiplePoints(opCtx,
chunk->getShardId(),
nss,
cm->getShardKeyPattern(),
@@ -277,7 +277,7 @@ public:
// Proactively refresh the chunk manager. Not strictly necessary, but this way it's
// immediately up-to-date the next time it's used.
- scopedCM.db()->getChunkManagerIfExists(txn, nss.ns(), true);
+ scopedCM.db()->getChunkManagerIfExists(opCtx, nss.ns(), true);
return true;
}
diff --git a/src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp b/src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp
index bd4c28dc8f8..f1616334ac0 100644
--- a/src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp
+++ b/src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp
@@ -118,7 +118,7 @@ public:
return Status::OK();
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -131,9 +131,9 @@ public:
parsedRequest.appendAsConfigCommand(&cmdBuilder);
cmdBuilder.append("writeConcern", kMajorityWriteConcern.toBSON());
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto cmdResponseStatus = uassertStatusOK(
- configShard->runCommandWithFixedRetryAttempts(txn,
+ configShard->runCommandWithFixedRetryAttempts(opCtx,
kPrimaryOnlyReadPreference,
"admin",
cmdBuilder.obj(),
diff --git a/src/mongo/s/commands/cluster_user_management_commands.cpp b/src/mongo/s/commands/cluster_user_management_commands.cpp
index 69aee76c4d3..18aa7736fe7 100644
--- a/src/mongo/s/commands/cluster_user_management_commands.cpp
+++ b/src/mongo/s/commands/cluster_user_management_commands.cpp
@@ -86,14 +86,14 @@ public:
return auth::checkAuthForCreateUserCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
- return Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ return Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
}
virtual void redactForLogging(mutablebson::Document* cmdObj) {
@@ -125,7 +125,7 @@ public:
return auth::checkAuthForUpdateUserCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -136,8 +136,8 @@ public:
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -175,7 +175,7 @@ public:
return auth::checkAuthForDropUserCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -186,8 +186,8 @@ public:
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -221,14 +221,14 @@ public:
return auth::checkAuthForDropAllUsersFromDatabaseCommand(client, dbname);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -262,7 +262,7 @@ public:
return auth::checkAuthForGrantRolesToUserCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -275,8 +275,8 @@ public:
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -310,7 +310,7 @@ public:
return auth::checkAuthForRevokeRolesFromUserCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -323,8 +323,8 @@ public:
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -362,14 +362,14 @@ public:
return auth::checkAuthForUsersInfoCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
- return Grid::get(txn)->catalogClient(txn)->runUserManagementReadCommand(
- txn, dbname, cmdObj, &result);
+ return Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementReadCommand(
+ opCtx, dbname, cmdObj, &result);
}
} cmdUsersInfo;
@@ -397,14 +397,14 @@ public:
return auth::checkAuthForCreateRoleCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
- return Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ return Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
}
} cmdCreateRole;
@@ -432,14 +432,14 @@ public:
return auth::checkAuthForUpdateRoleCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -473,14 +473,14 @@ public:
return auth::checkAuthForGrantPrivilegesToRoleCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -514,14 +514,14 @@ public:
return auth::checkAuthForRevokePrivilegesFromRoleCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -555,14 +555,14 @@ public:
return auth::checkAuthForGrantRolesToRoleCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -596,14 +596,14 @@ public:
return auth::checkAuthForRevokeRolesFromRoleCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -640,14 +640,14 @@ public:
return auth::checkAuthForDropRoleCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -685,14 +685,14 @@ public:
return auth::checkAuthForDropAllRolesFromDatabaseCommand(client, dbname);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -730,14 +730,14 @@ public:
return auth::checkAuthForRolesInfoCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
- return Grid::get(txn)->catalogClient(txn)->runUserManagementReadCommand(
- txn, dbname, cmdObj, &result);
+ return Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementReadCommand(
+ opCtx, dbname, cmdObj, &result);
}
} cmdRolesInfo;
@@ -769,7 +769,7 @@ public:
return auth::checkAuthForInvalidateUserCacheCommand(client);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -820,14 +820,14 @@ public:
return auth::checkAuthForMergeAuthzCollectionsCommand(client, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
- return Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ return Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
}
} cmdMergeAuthzCollections;
@@ -838,7 +838,7 @@ public:
*
* Returned error indicates a failure.
*/
-Status runUpgradeOnAllShards(OperationContext* txn, int maxSteps, BSONObjBuilder& result) {
+Status runUpgradeOnAllShards(OperationContext* opCtx, int maxSteps, BSONObjBuilder& result) {
BSONObjBuilder cmdObjBuilder;
cmdObjBuilder.append("authSchemaUpgrade", 1);
cmdObjBuilder.append("maxSteps", maxSteps);
@@ -847,19 +847,19 @@ Status runUpgradeOnAllShards(OperationContext* txn, int maxSteps, BSONObjBuilder
const BSONObj cmdObj = cmdObjBuilder.done();
// Upgrade each shard in turn, stopping on first failure.
- auto shardRegistry = Grid::get(txn)->shardRegistry();
- shardRegistry->reload(txn);
+ auto shardRegistry = Grid::get(opCtx)->shardRegistry();
+ shardRegistry->reload(opCtx);
vector<ShardId> shardIds;
shardRegistry->getAllShardIds(&shardIds);
bool hasWCError = false;
for (const auto& shardId : shardIds) {
- auto shardStatus = shardRegistry->getShard(txn, shardId);
+ auto shardStatus = shardRegistry->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
auto cmdResult = shardStatus.getValue()->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"admin",
cmdObj,
@@ -910,15 +910,15 @@ public:
return auth::checkAuthForAuthSchemaUpgradeCommand(client);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
// Run the authSchemaUpgrade command on the config servers
- if (!Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result)) {
+ if (!Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result)) {
return false;
}
@@ -930,7 +930,7 @@ public:
// Optionally run the authSchemaUpgrade command on the individual shards
if (parsedArgs.shouldUpgradeShards) {
- status = runUpgradeOnAllShards(txn, parsedArgs.maxSteps, result);
+ status = runUpgradeOnAllShards(opCtx, parsedArgs.maxSteps, result);
if (!status.isOK()) {
// If the status is a write concern error, append a writeConcernError instead of
// and error message.
diff --git a/src/mongo/s/commands/cluster_whats_my_uri_cmd.cpp b/src/mongo/s/commands/cluster_whats_my_uri_cmd.cpp
index 9a029d0f861..7f3e5e19c11 100644
--- a/src/mongo/s/commands/cluster_whats_my_uri_cmd.cpp
+++ b/src/mongo/s/commands/cluster_whats_my_uri_cmd.cpp
@@ -57,7 +57,7 @@ public:
// No auth required
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/s/commands/cluster_write.cpp b/src/mongo/s/commands/cluster_write.cpp
index 3468ca6dcf4..730d5e8a178 100644
--- a/src/mongo/s/commands/cluster_write.cpp
+++ b/src/mongo/s/commands/cluster_write.cpp
@@ -66,9 +66,9 @@ void toBatchError(const Status& status, BatchedCommandResponse* response) {
dassert(response->isValid(NULL));
}
-void reloadChunkManager(OperationContext* txn, const NamespaceString& nss) {
- auto config = uassertStatusOK(ScopedShardDatabase::getExisting(txn, nss.db()));
- config.db()->getChunkManagerIfExists(txn, nss.ns(), true);
+void reloadChunkManager(OperationContext* opCtx, const NamespaceString& nss) {
+ auto config = uassertStatusOK(ScopedShardDatabase::getExisting(opCtx, nss.db()));
+ config.db()->getChunkManagerIfExists(opCtx, nss.ns(), true);
}
/**
@@ -104,7 +104,7 @@ uint64_t calculateDesiredChunkSize(uint64_t maxChunkSizeBytes, uint64_t numChunk
* ordered list of ascending/descending field names. For example {a : 1, b : -1} is not special, but
* {a : "hashed"} is.
*/
-BSONObj findExtremeKeyForShard(OperationContext* txn,
+BSONObj findExtremeKeyForShard(OperationContext* opCtx,
const NamespaceString& nss,
const ShardId& shardId,
const ShardKeyPattern& shardKeyPattern,
@@ -130,7 +130,8 @@ BSONObj findExtremeKeyForShard(OperationContext* txn,
// Find the extreme key
const auto shardConnStr = [&]() {
- const auto shard = uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, shardId));
+ const auto shard =
+ uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId));
return shard->getConnString();
}();
@@ -172,8 +173,10 @@ BSONObj findExtremeKeyForShard(OperationContext* txn,
/**
* Splits the chunks touched based from the targeter stats if needed.
*/
-void splitIfNeeded(OperationContext* txn, const NamespaceString& nss, const TargeterStats& stats) {
- auto scopedCMStatus = ScopedChunkManager::get(txn, nss);
+void splitIfNeeded(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const TargeterStats& stats) {
+ auto scopedCMStatus = ScopedChunkManager::get(opCtx, nss);
if (!scopedCMStatus.isOK()) {
warning() << "failed to get collection information for " << nss
<< " while checking for auto-split" << causedBy(scopedCMStatus.getStatus());
@@ -196,7 +199,7 @@ void splitIfNeeded(OperationContext* txn, const NamespaceString& nss, const Targ
return;
}
- updateChunkWriteStatsAndSplitIfNeeded(txn, scopedCM.cm().get(), chunk.get(), it->second);
+ updateChunkWriteStatsAndSplitIfNeeded(opCtx, scopedCM.cm().get(), chunk.get(), it->second);
}
}
@@ -205,7 +208,7 @@ void splitIfNeeded(OperationContext* txn, const NamespaceString& nss, const Targ
ClusterWriter::ClusterWriter(bool autoSplit, int timeoutMillis)
: _autoSplit(autoSplit), _timeoutMillis(timeoutMillis) {}
-void ClusterWriter::write(OperationContext* txn,
+void ClusterWriter::write(OperationContext* opCtx,
const BatchedCommandRequest& origRequest,
BatchedCommandResponse* response) {
// Add _ids to insert request if req'd
@@ -291,14 +294,14 @@ void ClusterWriter::write(OperationContext* txn,
request = requestWithWriteConcern.get();
}
- Grid::get(txn)->catalogClient(txn)->writeConfigServerDirect(txn, *request, response);
+ Grid::get(opCtx)->catalogClient(opCtx)->writeConfigServerDirect(opCtx, *request, response);
} else {
TargeterStats targeterStats;
{
ChunkManagerTargeter targeter(request->getTargetingNSS(), &targeterStats);
- Status targetInitStatus = targeter.init(txn);
+ Status targetInitStatus = targeter.init(opCtx);
if (!targetInitStatus.isOK()) {
toBatchError(Status(targetInitStatus.code(),
str::stream()
@@ -313,11 +316,11 @@ void ClusterWriter::write(OperationContext* txn,
DBClientMultiCommand dispatcher;
BatchWriteExec exec(&targeter, &dispatcher);
- exec.executeBatch(txn, *request, response, &_stats);
+ exec.executeBatch(opCtx, *request, response, &_stats);
}
if (_autoSplit) {
- splitIfNeeded(txn, request->getNS(), targeterStats);
+ splitIfNeeded(opCtx, request->getNS(), targeterStats);
}
}
}
@@ -326,7 +329,7 @@ const BatchWriteExecStats& ClusterWriter::getStats() {
return _stats;
}
-void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* txn,
+void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* opCtx,
ChunkManager* manager,
Chunk* chunk,
long dataWritten) {
@@ -334,7 +337,7 @@ void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* txn,
// bubbled up on the client connection doing a write.
LastError::Disabled d(&LastError::get(cc()));
- const auto balancerConfig = Grid::get(txn)->getBalancerConfiguration();
+ const auto balancerConfig = Grid::get(opCtx)->getBalancerConfiguration();
const bool minIsInf =
(0 == manager->getShardKeyPattern().getKeyPattern().globalMin().woCompare(chunk->getMin()));
@@ -370,7 +373,7 @@ void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* txn,
try {
// Ensure we have the most up-to-date balancer configuration
- uassertStatusOK(balancerConfig->refreshAndCheck(txn));
+ uassertStatusOK(balancerConfig->refreshAndCheck(opCtx));
if (!balancerConfig->getShouldAutoSplit()) {
return;
@@ -393,7 +396,7 @@ void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* txn,
}();
auto splitPoints =
- uassertStatusOK(shardutil::selectChunkSplitPoints(txn,
+ uassertStatusOK(shardutil::selectChunkSplitPoints(opCtx,
chunk->getShardId(),
nss,
manager->getShardKeyPattern(),
@@ -425,13 +428,13 @@ void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* txn,
if (KeyPattern::isOrderedKeyPattern(manager->getShardKeyPattern().toBSON())) {
if (minIsInf) {
BSONObj key = findExtremeKeyForShard(
- txn, nss, chunk->getShardId(), manager->getShardKeyPattern(), true);
+ opCtx, nss, chunk->getShardId(), manager->getShardKeyPattern(), true);
if (!key.isEmpty()) {
splitPoints.front() = key.getOwned();
}
} else if (maxIsInf) {
BSONObj key = findExtremeKeyForShard(
- txn, nss, chunk->getShardId(), manager->getShardKeyPattern(), false);
+ opCtx, nss, chunk->getShardId(), manager->getShardKeyPattern(), false);
if (!key.isEmpty()) {
splitPoints.back() = key.getOwned();
}
@@ -439,7 +442,7 @@ void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* txn,
}
const auto suggestedMigrateChunk =
- uassertStatusOK(shardutil::splitChunkAtMultiplePoints(txn,
+ uassertStatusOK(shardutil::splitChunkAtMultiplePoints(opCtx,
chunk->getShardId(),
nss,
manager->getShardKeyPattern(),
@@ -454,7 +457,7 @@ void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* txn,
return false;
auto collStatus =
- Grid::get(txn)->catalogClient(txn)->getCollection(txn, manager->getns());
+ Grid::get(opCtx)->catalogClient(opCtx)->getCollection(opCtx, manager->getns());
if (!collStatus.isOK()) {
log() << "Auto-split for " << nss << " failed to load collection metadata"
<< causedBy(redact(collStatus.getStatus()));
@@ -470,7 +473,7 @@ void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* txn,
(shouldBalance ? ")" : ", but no migrations allowed)"));
if (!shouldBalance || !suggestedMigrateChunk) {
- reloadChunkManager(txn, nss);
+ reloadChunkManager(opCtx, nss);
return;
}
@@ -482,7 +485,7 @@ void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* txn,
// We need to use the latest chunk manager (after the split) in order to have the most
// up-to-date view of the chunk we are about to move
- auto scopedCM = uassertStatusOK(ScopedChunkManager::refreshAndGet(txn, nss));
+ auto scopedCM = uassertStatusOK(ScopedChunkManager::refreshAndGet(opCtx, nss));
auto suggestedChunk = scopedCM.cm()->findIntersectingChunkWithSimpleCollation(
suggestedMigrateChunk->getMin());
@@ -493,9 +496,9 @@ void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* txn,
chunkToMove.setMax(suggestedChunk->getMax());
chunkToMove.setVersion(suggestedChunk->getLastmod());
- uassertStatusOK(configsvr_client::rebalanceChunk(txn, chunkToMove));
+ uassertStatusOK(configsvr_client::rebalanceChunk(opCtx, chunkToMove));
- reloadChunkManager(txn, nss);
+ reloadChunkManager(opCtx, nss);
} catch (const DBException& ex) {
chunk->randomizeBytesWritten();
diff --git a/src/mongo/s/commands/cluster_write.h b/src/mongo/s/commands/cluster_write.h
index 80f4a325ddf..cf41b20bcb5 100644
--- a/src/mongo/s/commands/cluster_write.h
+++ b/src/mongo/s/commands/cluster_write.h
@@ -43,7 +43,7 @@ class ClusterWriter {
public:
ClusterWriter(bool autoSplit, int timeoutMillis);
- void write(OperationContext* txn,
+ void write(OperationContext* opCtx,
const BatchedCommandRequest& request,
BatchedCommandResponse* response);
@@ -61,7 +61,7 @@ private:
* max size of a shard attempt to split the chunk. This call is opportunistic and swallows any
* errors.
*/
-void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* txn,
+void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* opCtx,
ChunkManager* manager,
Chunk* chunk,
long dataWritten);
diff --git a/src/mongo/s/commands/cluster_write_cmd.cpp b/src/mongo/s/commands/cluster_write_cmd.cpp
index 270510163d5..d8a05b779b6 100644
--- a/src/mongo/s/commands/cluster_write_cmd.cpp
+++ b/src/mongo/s/commands/cluster_write_cmd.cpp
@@ -92,7 +92,7 @@ public:
return status;
}
- virtual Status explain(OperationContext* txn,
+ virtual Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
ExplainCommon::Verbosity verbosity,
@@ -120,16 +120,16 @@ public:
BatchItemRef targetingBatchItem(&request, 0);
vector<Strategy::CommandResult> shardResults;
Status status =
- _commandOpWrite(txn, dbname, explainCmdBob.obj(), targetingBatchItem, &shardResults);
+ _commandOpWrite(opCtx, dbname, explainCmdBob.obj(), targetingBatchItem, &shardResults);
if (!status.isOK()) {
return status;
}
return ClusterExplain::buildExplainResult(
- txn, shardResults, ClusterExplain::kWriteOnShards, timer.millis(), out);
+ opCtx, shardResults, ClusterExplain::kWriteOnShards, timer.millis(), out);
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -152,7 +152,7 @@ public:
response.setErrCode(ErrorCodes::FailedToParse);
response.setErrMessage(errmsg);
} else {
- writer.write(txn, request, &response);
+ writer.write(opCtx, request, &response);
}
dassert(response.isValid(NULL));
@@ -220,7 +220,7 @@ private:
*
* Does *not* retry or retarget if the metadata is stale.
*/
- static Status _commandOpWrite(OperationContext* txn,
+ static Status _commandOpWrite(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& command,
BatchItemRef targetingBatchItem,
@@ -230,7 +230,7 @@ private:
TargeterStats stats;
ChunkManagerTargeter targeter(
NamespaceString(targetingBatchItem.getRequest()->getTargetingNS()), &stats);
- Status status = targeter.init(txn);
+ Status status = targeter.init(opCtx);
if (!status.isOK())
return status;
@@ -239,22 +239,25 @@ private:
if (targetingBatchItem.getOpType() == BatchedCommandRequest::BatchType_Insert) {
ShardEndpoint* endpoint;
- Status status = targeter.targetInsert(txn, targetingBatchItem.getDocument(), &endpoint);
+ Status status =
+ targeter.targetInsert(opCtx, targetingBatchItem.getDocument(), &endpoint);
if (!status.isOK())
return status;
endpoints.push_back(endpoint);
} else if (targetingBatchItem.getOpType() == BatchedCommandRequest::BatchType_Update) {
- Status status = targeter.targetUpdate(txn, *targetingBatchItem.getUpdate(), &endpoints);
+ Status status =
+ targeter.targetUpdate(opCtx, *targetingBatchItem.getUpdate(), &endpoints);
if (!status.isOK())
return status;
} else {
invariant(targetingBatchItem.getOpType() == BatchedCommandRequest::BatchType_Delete);
- Status status = targeter.targetDelete(txn, *targetingBatchItem.getDelete(), &endpoints);
+ Status status =
+ targeter.targetDelete(opCtx, *targetingBatchItem.getDelete(), &endpoints);
if (!status.isOK())
return status;
}
- auto shardRegistry = Grid::get(txn)->shardRegistry();
+ auto shardRegistry = Grid::get(opCtx)->shardRegistry();
// Assemble requests
std::vector<AsyncRequestsSender::Request> requests;
@@ -262,7 +265,7 @@ private:
++it) {
const ShardEndpoint* endpoint = *it;
- auto shardStatus = shardRegistry->getShard(txn, endpoint->shardName);
+ auto shardStatus = shardRegistry->getShard(opCtx, endpoint->shardName);
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
@@ -272,12 +275,12 @@ private:
// Send the requests and wait to receive all the responses.
const ReadPreferenceSetting readPref(ReadPreference::PrimaryOnly, TagSet());
- AsyncRequestsSender ars(txn,
- Grid::get(txn)->getExecutorPool()->getArbitraryExecutor(),
+ AsyncRequestsSender ars(opCtx,
+ Grid::get(opCtx)->getExecutorPool()->getArbitraryExecutor(),
dbName,
requests,
readPref);
- auto responses = ars.waitForResponses(txn);
+ auto responses = ars.waitForResponses(opCtx);
// Parse the responses.
@@ -294,7 +297,7 @@ private:
invariant(response.shardHostAndPort);
result.target = ConnectionString(std::move(*response.shardHostAndPort));
- auto shardStatus = shardRegistry->getShard(txn, result.target.toString());
+ auto shardStatus = shardRegistry->getShard(opCtx, result.target.toString());
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
diff --git a/src/mongo/s/commands/commands_public.cpp b/src/mongo/s/commands/commands_public.cpp
index e5324266d35..7d14499e9d8 100644
--- a/src/mongo/s/commands/commands_public.cpp
+++ b/src/mongo/s/commands/commands_public.cpp
@@ -85,13 +85,14 @@ using std::vector;
namespace {
-bool cursorCommandPassthrough(OperationContext* txn,
+bool cursorCommandPassthrough(OperationContext* opCtx,
shared_ptr<DBConfig> conf,
const BSONObj& cmdObj,
const NamespaceString& nss,
int options,
BSONObjBuilder* out) {
- const auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, conf->getPrimaryId());
+ const auto shardStatus =
+ Grid::get(opCtx)->shardRegistry()->getShard(opCtx, conf->getPrimaryId());
if (!shardStatus.isOK()) {
invariant(shardStatus.getStatus() == ErrorCodes::ShardNotFound);
return Command::appendCommandStatus(*out, shardStatus.getStatus());
@@ -119,12 +120,12 @@ bool cursorCommandPassthrough(OperationContext* txn,
}
StatusWith<BSONObj> transformedResponse =
- storePossibleCursor(txn,
+ storePossibleCursor(opCtx,
HostAndPort(cursor->originalHost()),
response,
nss,
- Grid::get(txn)->getExecutorPool()->getArbitraryExecutor(),
- Grid::get(txn)->getCursorManager());
+ Grid::get(opCtx)->getExecutorPool()->getArbitraryExecutor(),
+ Grid::get(opCtx)->getCursorManager());
if (!transformedResponse.isOK()) {
return Command::appendCommandStatus(*out, transformedResponse.getStatus());
}
@@ -172,37 +173,37 @@ public:
// all grid commands are designed not to lock
protected:
- bool passthrough(OperationContext* txn,
+ bool passthrough(OperationContext* opCtx,
DBConfig* conf,
const BSONObj& cmdObj,
BSONObjBuilder& result) {
- return _passthrough(txn, conf->name(), conf, cmdObj, 0, result);
+ return _passthrough(opCtx, conf->name(), conf, cmdObj, 0, result);
}
- bool adminPassthrough(OperationContext* txn,
+ bool adminPassthrough(OperationContext* opCtx,
DBConfig* conf,
const BSONObj& cmdObj,
BSONObjBuilder& result) {
- return _passthrough(txn, "admin", conf, cmdObj, 0, result);
+ return _passthrough(opCtx, "admin", conf, cmdObj, 0, result);
}
- bool passthrough(OperationContext* txn,
+ bool passthrough(OperationContext* opCtx,
DBConfig* conf,
const BSONObj& cmdObj,
int options,
BSONObjBuilder& result) {
- return _passthrough(txn, conf->name(), conf, cmdObj, options, result);
+ return _passthrough(opCtx, conf->name(), conf, cmdObj, options, result);
}
private:
- bool _passthrough(OperationContext* txn,
+ bool _passthrough(OperationContext* opCtx,
const string& db,
DBConfig* conf,
const BSONObj& cmdObj,
int options,
BSONObjBuilder& result) {
const auto shardStatus =
- Grid::get(txn)->shardRegistry()->getShard(txn, conf->getPrimaryId());
+ Grid::get(opCtx)->shardRegistry()->getShard(opCtx, conf->getPrimaryId());
const auto shard = uassertStatusOK(shardStatus);
ShardConnection conn(shard->getConnString(), "");
@@ -229,13 +230,13 @@ public:
bool implicitCreateDb = false)
: RunOnAllShardsCommand(n, oldname, useShardConn, implicitCreateDb) {}
- virtual void getShardIds(OperationContext* txn,
+ virtual void getShardIds(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
vector<ShardId>& shardIds) {
const NamespaceString nss(parseNsCollectionRequired(dbName, cmdObj));
- auto status = Grid::get(txn)->catalogCache()->getDatabase(txn, dbName);
+ auto status = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName);
uassertStatusOK(status.getStatus());
shared_ptr<DBConfig> conf = status.getValue();
@@ -243,7 +244,7 @@ public:
if (!conf->isSharded(nss.ns())) {
shardIds.push_back(conf->getPrimaryId());
} else {
- Grid::get(txn)->shardRegistry()->getAllShardIds(&shardIds);
+ Grid::get(opCtx)->shardRegistry()->getAllShardIds(&shardIds);
}
}
};
@@ -252,7 +253,7 @@ class NotAllowedOnShardedCollectionCmd : public PublicGridCommand {
public:
NotAllowedOnShardedCollectionCmd(const char* n) : PublicGridCommand(n) {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int options,
@@ -260,9 +261,9 @@ public:
BSONObjBuilder& result) {
const NamespaceString nss(parseNs(dbName, cmdObj));
- auto conf = uassertStatusOK(Grid::get(txn)->catalogCache()->getDatabase(txn, dbName));
+ auto conf = uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName));
if (!conf->isSharded(nss.ns())) {
- return passthrough(txn, conf.get(), cmdObj, options, result);
+ return passthrough(opCtx, conf.get(), cmdObj, options, result);
}
return appendCommandStatus(
@@ -451,7 +452,7 @@ public:
return false;
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int options,
@@ -459,18 +460,24 @@ public:
BSONObjBuilder& output) {
const NamespaceString nss(parseNsCollectionRequired(dbName, cmdObj));
- auto conf = uassertStatusOK(Grid::get(txn)->catalogCache()->getDatabase(txn, dbName));
+ auto conf = uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName));
if (!conf->isSharded(nss.ns())) {
- return passthrough(txn, conf.get(), cmdObj, output);
+ return passthrough(opCtx, conf.get(), cmdObj, output);
}
- shared_ptr<ChunkManager> cm = conf->getChunkManager(txn, nss.ns());
+ shared_ptr<ChunkManager> cm = conf->getChunkManager(opCtx, nss.ns());
massert(40051, "chunk manager should not be null", cm);
vector<Strategy::CommandResult> results;
const BSONObj query;
- Strategy::commandOp(
- txn, dbName, cmdObj, options, cm->getns(), query, CollationSpec::kSimpleSpec, &results);
+ Strategy::commandOp(opCtx,
+ dbName,
+ cmdObj,
+ options,
+ cm->getns(),
+ query,
+ CollationSpec::kSimpleSpec,
+ &results);
BSONObjBuilder rawResBuilder(output.subobjStart("raw"));
bool isValid = true;
@@ -519,19 +526,19 @@ public:
virtual bool supportsWriteConcern(const BSONObj& cmd) const override {
return true;
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int,
string& errmsg,
BSONObjBuilder& result) {
- auto dbStatus = ScopedShardDatabase::getOrCreate(txn, dbName);
+ auto dbStatus = ScopedShardDatabase::getOrCreate(opCtx, dbName);
if (!dbStatus.isOK()) {
return appendCommandStatus(result, dbStatus.getStatus());
}
auto scopedDb = std::move(dbStatus.getValue());
- return passthrough(txn, scopedDb.db(), cmdObj, result);
+ return passthrough(opCtx, scopedDb.db(), cmdObj, result);
}
} createCmd;
@@ -550,7 +557,7 @@ public:
virtual bool supportsWriteConcern(const BSONObj& cmd) const override {
return true;
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int,
@@ -567,7 +574,7 @@ public:
const string dbNameFrom = fullnsFrom.db().toString();
auto confFrom =
- uassertStatusOK(Grid::get(txn)->catalogCache()->getDatabase(txn, dbNameFrom));
+ uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbNameFrom));
const auto fullnsToElt = cmdObj["to"];
uassert(ErrorCodes::InvalidNamespace,
@@ -578,7 +585,8 @@ public:
str::stream() << "Invalid target namespace: " << fullnsTo.ns(),
fullnsTo.isValid());
const string dbNameTo = fullnsTo.db().toString();
- auto confTo = uassertStatusOK(Grid::get(txn)->catalogCache()->getDatabase(txn, dbNameTo));
+ auto confTo =
+ uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbNameTo));
uassert(
13138, "You can't rename a sharded collection", !confFrom->isSharded(fullnsFrom.ns()));
@@ -592,7 +600,7 @@ public:
"Source and destination collections must be on same shard",
shardFrom == shardTo);
- return adminPassthrough(txn, confFrom.get(), cmdObj, result);
+ return adminPassthrough(opCtx, confFrom.get(), cmdObj, result);
}
} renameCollectionCmd;
@@ -614,7 +622,7 @@ public:
return true;
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int options,
@@ -629,14 +637,14 @@ public:
"Invalid todb argument",
NamespaceString::validDBName(todb, NamespaceString::DollarInDbNameBehavior::Allow));
- auto scopedToDb = uassertStatusOK(ScopedShardDatabase::getOrCreate(txn, todb));
+ auto scopedToDb = uassertStatusOK(ScopedShardDatabase::getOrCreate(opCtx, todb));
uassert(ErrorCodes::IllegalOperation,
"Cannot copy to a sharded database",
!scopedToDb.db()->isShardingEnabled());
const string fromhost = cmdObj.getStringField("fromhost");
if (!fromhost.empty()) {
- return adminPassthrough(txn, scopedToDb.db(), cmdObj, result);
+ return adminPassthrough(opCtx, scopedToDb.db(), cmdObj, result);
}
const auto fromDbElt = cmdObj["fromdb"];
@@ -649,7 +657,7 @@ public:
"invalid fromdb argument",
NamespaceString::validDBName(fromdb, NamespaceString::DollarInDbNameBehavior::Allow));
- auto scopedFromDb = uassertStatusOK(ScopedShardDatabase::getExisting(txn, fromdb));
+ auto scopedFromDb = uassertStatusOK(ScopedShardDatabase::getExisting(opCtx, fromdb));
uassert(ErrorCodes::IllegalOperation,
"Cannot copy from a sharded database",
!scopedFromDb.db()->isShardingEnabled());
@@ -662,12 +670,12 @@ public:
}
{
- const auto shard = uassertStatusOK(
- Grid::get(txn)->shardRegistry()->getShard(txn, scopedFromDb.db()->getPrimaryId()));
+ const auto shard = uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(
+ opCtx, scopedFromDb.db()->getPrimaryId()));
b.append("fromhost", shard->getConnString().toString());
}
- return adminPassthrough(txn, scopedToDb.db(), b.obj(), result);
+ return adminPassthrough(opCtx, scopedToDb.db(), b.obj(), result);
}
} clusterCopyDBCmd;
@@ -687,7 +695,7 @@ public:
return false;
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int,
@@ -695,17 +703,17 @@ public:
BSONObjBuilder& result) {
const NamespaceString nss(parseNsCollectionRequired(dbName, cmdObj));
- auto conf = uassertStatusOK(Grid::get(txn)->catalogCache()->getDatabase(txn, dbName));
+ auto conf = uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName));
if (!conf->isSharded(nss.ns())) {
result.appendBool("sharded", false);
result.append("primary", conf->getPrimaryId().toString());
- return passthrough(txn, conf.get(), cmdObj, result);
+ return passthrough(opCtx, conf.get(), cmdObj, result);
}
result.appendBool("sharded", true);
- shared_ptr<ChunkManager> cm = conf->getChunkManager(txn, nss.ns());
+ shared_ptr<ChunkManager> cm = conf->getChunkManager(opCtx, nss.ns());
massert(12594, "how could chunk manager be null!", cm);
BSONObjBuilder shardStats;
@@ -720,7 +728,7 @@ public:
set<ShardId> shardIds;
cm->getAllShardIds(&shardIds);
for (const ShardId& shardId : shardIds) {
- const auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+ const auto shardStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
invariant(shardStatus.getStatus() == ErrorCodes::ShardNotFound);
continue;
@@ -865,7 +873,7 @@ public:
virtual bool supportsWriteConcern(const BSONObj& cmd) const override {
return false;
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int,
@@ -874,12 +882,12 @@ public:
const string fullns = parseNs(dbName, cmdObj);
const string nsDBName = nsToDatabase(fullns);
- auto conf = uassertStatusOK(Grid::get(txn)->catalogCache()->getDatabase(txn, nsDBName));
+ auto conf = uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, nsDBName));
if (!conf->isSharded(fullns)) {
- return passthrough(txn, conf.get(), cmdObj, result);
+ return passthrough(opCtx, conf.get(), cmdObj, result);
}
- shared_ptr<ChunkManager> cm = conf->getChunkManager(txn, fullns);
+ shared_ptr<ChunkManager> cm = conf->getChunkManager(opCtx, fullns);
massert(13407, "how could chunk manager be null!", cm);
BSONObj min = cmdObj.getObjectField("min");
@@ -909,7 +917,7 @@ public:
cm->getShardIdsForRange(min, max, &shardIds);
for (const ShardId& shardId : shardIds) {
- const auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+ const auto shardStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
invariant(shardStatus.getStatus() == ErrorCodes::ShardNotFound);
continue;
@@ -990,7 +998,7 @@ public:
return nss.ns();
}
- Status explain(OperationContext* txn,
+ Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
ExplainCommon::Verbosity verbosity,
@@ -1013,7 +1021,7 @@ public:
// Note that this implementation will not handle targeting retries and fails when the
// sharding metadata is too stale
- auto status = Grid::get(txn)->catalogCache()->getDatabase(txn, nss.db());
+ auto status = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, nss.db());
if (!status.isOK()) {
return Status(status.getStatus().code(),
str::stream() << "Passthrough command failed: " << command.toString()
@@ -1033,7 +1041,7 @@ public:
}
const auto primaryShardStatus =
- Grid::get(txn)->shardRegistry()->getShard(txn, conf->getPrimaryId());
+ Grid::get(opCtx)->shardRegistry()->getShard(opCtx, conf->getPrimaryId());
if (!primaryShardStatus.isOK()) {
return primaryShardStatus.getStatus();
}
@@ -1064,7 +1072,7 @@ public:
cmdResult.target = primaryShardStatus.getValue()->getConnString();
return ClusterExplain::buildExplainResult(
- txn, {cmdResult}, ClusterExplain::kSingleShard, timer.millis(), out);
+ opCtx, {cmdResult}, ClusterExplain::kSingleShard, timer.millis(), out);
}
} groupCmd;
@@ -1088,7 +1096,7 @@ public:
}
return Status::OK();
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int options,
@@ -1099,7 +1107,8 @@ public:
errmsg = str::stream() << "doing a splitVector across dbs isn't supported via mongos";
return false;
}
- return NotAllowedOnShardedCollectionCmd::run(txn, dbName, cmdObj, options, errmsg, result);
+ return NotAllowedOnShardedCollectionCmd::run(
+ opCtx, dbName, cmdObj, options, errmsg, result);
}
virtual std::string parseNs(const string& dbname, const BSONObj& cmdObj) const {
return parseNsFullyQualified(dbname, cmdObj);
@@ -1127,7 +1136,7 @@ public:
return false;
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int options,
@@ -1135,7 +1144,7 @@ public:
BSONObjBuilder& result) {
const NamespaceString nss(parseNsCollectionRequired(dbName, cmdObj));
- auto status = Grid::get(txn)->catalogCache()->getDatabase(txn, dbName);
+ auto status = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName);
if (!status.isOK()) {
return appendEmptyResultSet(result, status.getStatus(), nss.ns());
}
@@ -1143,7 +1152,7 @@ public:
shared_ptr<DBConfig> conf = status.getValue();
if (!conf->isSharded(nss.ns())) {
- if (passthrough(txn, conf.get(), cmdObj, options, result)) {
+ if (passthrough(opCtx, conf.get(), cmdObj, options, result)) {
return true;
}
@@ -1153,7 +1162,7 @@ public:
result.resetToEmpty();
auto parsedDistinct = ParsedDistinct::parse(
- txn, resolvedView.getNamespace(), cmdObj, ExtensionsCallbackNoop(), false);
+ opCtx, resolvedView.getNamespace(), cmdObj, ExtensionsCallbackNoop(), false);
if (!parsedDistinct.isOK()) {
return appendCommandStatus(result, parsedDistinct.getStatus());
}
@@ -1170,7 +1179,7 @@ public:
BSONObjBuilder aggResult;
Command::findCommand("aggregate")
- ->run(txn, dbName, aggCmd.getValue(), options, errmsg, aggResult);
+ ->run(opCtx, dbName, aggCmd.getValue(), options, errmsg, aggResult);
ViewResponseFormatter formatter(aggResult.obj());
auto formatStatus = formatter.appendAsDistinctResponse(&result);
@@ -1183,7 +1192,7 @@ public:
return false;
}
- shared_ptr<ChunkManager> cm = conf->getChunkManager(txn, nss.ns());
+ shared_ptr<ChunkManager> cm = conf->getChunkManager(opCtx, nss.ns());
massert(10420, "how could chunk manager be null!", cm);
BSONObj query = getQuery(cmdObj);
@@ -1195,7 +1204,7 @@ public:
// Construct collator for deduping.
std::unique_ptr<CollatorInterface> collator;
if (!queryCollation.getValue().isEmpty()) {
- auto statusWithCollator = CollatorFactoryInterface::get(txn->getServiceContext())
+ auto statusWithCollator = CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(queryCollation.getValue());
if (!statusWithCollator.isOK()) {
return appendEmptyResultSet(result, statusWithCollator.getStatus(), nss.ns());
@@ -1204,7 +1213,7 @@ public:
}
set<ShardId> shardIds;
- cm->getShardIdsForQuery(txn, query, queryCollation.getValue(), &shardIds);
+ cm->getShardIdsForQuery(opCtx, query, queryCollation.getValue(), &shardIds);
BSONObjComparator bsonCmp(BSONObj(),
BSONObjComparator::FieldNamesMode::kConsider,
@@ -1213,7 +1222,7 @@ public:
BSONObjSet all = bsonCmp.makeBSONObjSet();
for (const ShardId& shardId : shardIds) {
- const auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+ const auto shardStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
invariant(shardStatus.getStatus() == ErrorCodes::ShardNotFound);
continue;
@@ -1248,7 +1257,7 @@ public:
return true;
}
- Status explain(OperationContext* txn,
+ Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
ExplainCommon::Verbosity verbosity,
@@ -1287,7 +1296,7 @@ public:
Timer timer;
vector<Strategy::CommandResult> shardResults;
- Strategy::commandOp(txn,
+ Strategy::commandOp(opCtx,
dbname,
explainCmdBob.obj(),
options,
@@ -1302,7 +1311,7 @@ public:
ResolvedView::isResolvedViewErrorResponse(shardResults[0].result)) {
auto resolvedView = ResolvedView::fromBSON(shardResults[0].result);
auto parsedDistinct = ParsedDistinct::parse(
- txn, resolvedView.getNamespace(), cmdObj, ExtensionsCallbackNoop(), true);
+ opCtx, resolvedView.getNamespace(), cmdObj, ExtensionsCallbackNoop(), true);
if (!parsedDistinct.isOK()) {
return parsedDistinct.getStatus();
}
@@ -1319,7 +1328,7 @@ public:
std::string errMsg;
if (Command::findCommand("aggregate")
- ->run(txn, dbname, aggCmd.getValue(), 0, errMsg, *out)) {
+ ->run(opCtx, dbname, aggCmd.getValue(), 0, errMsg, *out)) {
return Status::OK();
}
@@ -1329,7 +1338,7 @@ public:
const char* mongosStageName = ClusterExplain::getStageNameForReadOp(shardResults, cmdObj);
return ClusterExplain::buildExplainResult(
- txn, shardResults, mongosStageName, millisElapsed, out);
+ opCtx, shardResults, mongosStageName, millisElapsed, out);
}
} disinctCmd;
@@ -1364,7 +1373,7 @@ public:
return false;
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int,
@@ -1372,12 +1381,12 @@ public:
BSONObjBuilder& result) {
const NamespaceString nss(parseNs(dbName, cmdObj));
- auto conf = uassertStatusOK(Grid::get(txn)->catalogCache()->getDatabase(txn, dbName));
+ auto conf = uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName));
if (!conf->isSharded(nss.ns())) {
- return passthrough(txn, conf.get(), cmdObj, result);
+ return passthrough(opCtx, conf.get(), cmdObj, result);
}
- shared_ptr<ChunkManager> cm = conf->getChunkManager(txn, nss.ns());
+ shared_ptr<ChunkManager> cm = conf->getChunkManager(opCtx, nss.ns());
massert(13091, "how could chunk manager be null!", cm);
if (SimpleBSONObjComparator::kInstance.evaluate(cm->getShardKeyPattern().toBSON() ==
BSON("files_id" << 1))) {
@@ -1385,7 +1394,7 @@ public:
vector<Strategy::CommandResult> results;
Strategy::commandOp(
- txn, dbName, cmdObj, 0, nss.ns(), finder, CollationSpec::kSimpleSpec, &results);
+ opCtx, dbName, cmdObj, 0, nss.ns(), finder, CollationSpec::kSimpleSpec, &results);
verify(results.size() == 1); // querying on shard key so should only talk to one shard
BSONObj res = results.begin()->result;
@@ -1418,7 +1427,7 @@ public:
vector<Strategy::CommandResult> results;
try {
- Strategy::commandOp(txn,
+ Strategy::commandOp(opCtx,
dbName,
shardCmd,
0,
@@ -1505,7 +1514,7 @@ public:
return false;
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int options,
@@ -1513,12 +1522,12 @@ public:
BSONObjBuilder& result) {
const NamespaceString nss(parseNsCollectionRequired(dbName, cmdObj));
- auto conf = uassertStatusOK(Grid::get(txn)->catalogCache()->getDatabase(txn, dbName));
+ auto conf = uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName));
if (!conf->isSharded(nss.ns())) {
- return passthrough(txn, conf.get(), cmdObj, options, result);
+ return passthrough(opCtx, conf.get(), cmdObj, options, result);
}
- shared_ptr<ChunkManager> cm = conf->getChunkManager(txn, nss.ns());
+ shared_ptr<ChunkManager> cm = conf->getChunkManager(opCtx, nss.ns());
massert(13500, "how could chunk manager be null!", cm);
BSONObj query = getQuery(cmdObj);
@@ -1527,7 +1536,7 @@ public:
return appendEmptyResultSet(result, collation.getStatus(), nss.ns());
}
set<ShardId> shardIds;
- cm->getShardIdsForQuery(txn, query, collation.getValue(), &shardIds);
+ cm->getShardIdsForQuery(opCtx, query, collation.getValue(), &shardIds);
// We support both "num" and "limit" options to control limit
int limit = 100;
@@ -1538,7 +1547,7 @@ public:
list<shared_ptr<Future::CommandResult>> futures;
BSONArrayBuilder shardArray;
for (const ShardId& shardId : shardIds) {
- const auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+ const auto shardStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
invariant(shardStatus.getStatus() == ErrorCodes::ShardNotFound);
continue;
@@ -1559,7 +1568,7 @@ public:
i != futures.end();
i++) {
shared_ptr<Future::CommandResult> res = *i;
- if (!res->join(txn)) {
+ if (!res->join(opCtx)) {
errmsg = res->result()["errmsg"].String();
if (res->result().hasField("code")) {
result.append(res->result()["code"]);
@@ -1634,7 +1643,7 @@ public:
virtual bool supportsWriteConcern(const BSONObj& cmd) const override {
return false;
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int,
@@ -1657,7 +1666,7 @@ public:
virtual bool supportsWriteConcern(const BSONObj& cmd) const override {
return false;
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int,
@@ -1669,13 +1678,13 @@ public:
// $eval isn't allowed to access sharded collections, but we need to leave the
// shard to detect that.
- auto status = Grid::get(txn)->catalogCache()->getDatabase(txn, dbName);
+ auto status = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName);
if (!status.isOK()) {
return appendCommandStatus(result, status.getStatus());
}
shared_ptr<DBConfig> conf = status.getValue();
- return passthrough(txn, conf.get(), cmdObj, result);
+ return passthrough(opCtx, conf.get(), cmdObj, result);
}
} evalCmd;
@@ -1706,7 +1715,7 @@ public:
return false;
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int options,
@@ -1714,12 +1723,12 @@ public:
BSONObjBuilder& result) final {
auto nss = NamespaceString::makeListCollectionsNSS(dbName);
- auto conf = Grid::get(txn)->catalogCache()->getDatabase(txn, dbName);
+ auto conf = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName);
if (!conf.isOK()) {
return appendEmptyResultSet(result, conf.getStatus(), dbName + ".$cmd.listCollections");
}
- return cursorCommandPassthrough(txn, conf.getValue(), cmdObj, nss, options, &result);
+ return cursorCommandPassthrough(opCtx, conf.getValue(), cmdObj, nss, options, &result);
}
} cmdListCollections;
@@ -1752,13 +1761,13 @@ public:
return false;
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) final {
- auto conf = Grid::get(txn)->catalogCache()->getDatabase(txn, dbName);
+ auto conf = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName);
if (!conf.isOK()) {
return appendCommandStatus(result, conf.getStatus());
}
@@ -1768,7 +1777,8 @@ public:
NamespaceString::makeListIndexesNSS(targetNss.db(), targetNss.coll());
dassert(targetNss == commandNss.getTargetNSForListIndexes());
- return cursorCommandPassthrough(txn, conf.getValue(), cmdObj, commandNss, options, &result);
+ return cursorCommandPassthrough(
+ opCtx, conf.getValue(), cmdObj, commandNss, options, &result);
}
} cmdListIndexes;
diff --git a/src/mongo/s/commands/run_on_all_shards_cmd.cpp b/src/mongo/s/commands/run_on_all_shards_cmd.cpp
index 9b0b26cf14b..b534bf0628a 100644
--- a/src/mongo/s/commands/run_on_all_shards_cmd.cpp
+++ b/src/mongo/s/commands/run_on_all_shards_cmd.cpp
@@ -64,14 +64,14 @@ BSONObj RunOnAllShardsCommand::specialErrorHandler(const std::string& server,
return originalResult;
}
-void RunOnAllShardsCommand::getShardIds(OperationContext* txn,
+void RunOnAllShardsCommand::getShardIds(OperationContext* opCtx,
const std::string& db,
BSONObj& cmdObj,
std::vector<ShardId>& shardIds) {
grid.shardRegistry()->getAllShardIds(&shardIds);
}
-bool RunOnAllShardsCommand::run(OperationContext* txn,
+bool RunOnAllShardsCommand::run(OperationContext* opCtx,
const std::string& dbName,
BSONObj& cmdObj,
int options,
@@ -80,15 +80,15 @@ bool RunOnAllShardsCommand::run(OperationContext* txn,
LOG(1) << "RunOnAllShardsCommand db: " << dbName << " cmd:" << redact(cmdObj);
if (_implicitCreateDb) {
- uassertStatusOK(ScopedShardDatabase::getOrCreate(txn, dbName));
+ uassertStatusOK(ScopedShardDatabase::getOrCreate(opCtx, dbName));
}
std::vector<ShardId> shardIds;
- getShardIds(txn, dbName, cmdObj, shardIds);
+ getShardIds(opCtx, dbName, cmdObj, shardIds);
std::list<std::shared_ptr<Future::CommandResult>> futures;
for (const ShardId& shardId : shardIds) {
- const auto shardStatus = grid.shardRegistry()->getShard(txn, shardId);
+ const auto shardStatus = grid.shardRegistry()->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
continue;
}
@@ -120,7 +120,7 @@ bool RunOnAllShardsCommand::run(OperationContext* txn,
++futuresit, ++shardIdsIt) {
std::shared_ptr<Future::CommandResult> res = *futuresit;
- if (res->join(txn)) {
+ if (res->join(opCtx)) {
// success :)
BSONObj result = res->result();
results.emplace_back(shardIdsIt->toString(), result);
diff --git a/src/mongo/s/commands/run_on_all_shards_cmd.h b/src/mongo/s/commands/run_on_all_shards_cmd.h
index f0c983b8578..a01c133b987 100644
--- a/src/mongo/s/commands/run_on_all_shards_cmd.h
+++ b/src/mongo/s/commands/run_on_all_shards_cmd.h
@@ -78,12 +78,12 @@ public:
const BSONObj& originalResult) const;
// The default implementation uses all shards.
- virtual void getShardIds(OperationContext* txn,
+ virtual void getShardIds(OperationContext* opCtx,
const std::string& db,
BSONObj& cmdObj,
std::vector<ShardId>& shardIds);
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& db,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/s/commands/strategy.cpp b/src/mongo/s/commands/strategy.cpp
index 148dcd66e05..a209b0a8f9b 100644
--- a/src/mongo/s/commands/strategy.cpp
+++ b/src/mongo/s/commands/strategy.cpp
@@ -82,7 +82,7 @@ using std::stringstream;
namespace {
-void runAgainstRegistered(OperationContext* txn,
+void runAgainstRegistered(OperationContext* opCtx,
const char* ns,
BSONObj& jsobj,
BSONObjBuilder& anObjBuilder,
@@ -104,14 +104,14 @@ void runAgainstRegistered(OperationContext* txn,
return;
}
- execCommandClient(txn, c, queryOptions, ns, jsobj, anObjBuilder);
+ execCommandClient(opCtx, c, queryOptions, ns, jsobj, anObjBuilder);
}
/**
* Called into by the web server. For now we just translate the parameters to their old style
* equivalents.
*/
-void execCommandHandler(OperationContext* txn,
+void execCommandHandler(OperationContext* opCtx,
Command* command,
const rpc::RequestInterface& request,
rpc::ReplyBuilderInterface* replyBuilder) {
@@ -124,7 +124,7 @@ void execCommandHandler(OperationContext* txn,
std::string db = request.getDatabase().rawData();
BSONObjBuilder result;
- execCommandClient(txn, command, queryFlags, request.getDatabase().rawData(), cmdObj, result);
+ execCommandClient(opCtx, command, queryFlags, request.getDatabase().rawData(), cmdObj, result);
replyBuilder->setCommandReply(result.done()).setMetadata(rpc::makeEmptyMetadata());
}
@@ -134,7 +134,7 @@ MONGO_INITIALIZER(InitializeCommandExecCommandHandler)(InitializerContext* const
return Status::OK();
}
-void registerErrorImpl(OperationContext* txn, const DBException& exception) {}
+void registerErrorImpl(OperationContext* opCtx, const DBException& exception) {}
MONGO_INITIALIZER(InitializeRegisterErrorHandler)(InitializerContext* const) {
Command::registerRegisterError(registerErrorImpl);
@@ -143,12 +143,12 @@ MONGO_INITIALIZER(InitializeRegisterErrorHandler)(InitializerContext* const) {
} // namespace
-void Strategy::queryOp(OperationContext* txn, const NamespaceString& nss, DbMessage* dbm) {
+void Strategy::queryOp(OperationContext* opCtx, const NamespaceString& nss, DbMessage* dbm) {
globalOpCounters.gotQuery();
const QueryMessage q(*dbm);
- Client* const client = txn->getClient();
+ Client* const client = opCtx->getClient();
AuthorizationSession* const authSession = AuthorizationSession::get(client);
Status status = authSession->checkAuthForFind(nss, false);
@@ -183,7 +183,7 @@ void Strategy::queryOp(OperationContext* txn, const NamespaceString& nss, DbMess
}();
auto canonicalQuery =
- uassertStatusOK(CanonicalQuery::canonicalize(txn, q, ExtensionsCallbackNoop()));
+ uassertStatusOK(CanonicalQuery::canonicalize(opCtx, q, ExtensionsCallbackNoop()));
// If the $explain flag was set, we must run the operation on the shards as an explain command
// rather than a find command.
@@ -199,7 +199,7 @@ void Strategy::queryOp(OperationContext* txn, const NamespaceString& nss, DbMess
BSONObjBuilder explainBuilder;
uassertStatusOK(Strategy::explainFind(
- txn, findCommand, queryRequest, verbosity, metadata, &explainBuilder));
+ opCtx, findCommand, queryRequest, verbosity, metadata, &explainBuilder));
BSONObj explainObj = explainBuilder.done();
replyToQuery(0, // query result flags
@@ -220,7 +220,7 @@ void Strategy::queryOp(OperationContext* txn, const NamespaceString& nss, DbMess
// 0 means the cursor is exhausted. Otherwise we assume that a cursor with the returned id can
// be retrieved via the ClusterCursorManager.
auto cursorId =
- ClusterFind::runQuery(txn,
+ ClusterFind::runQuery(opCtx,
*canonicalQuery,
readPreference,
&batch,
@@ -249,10 +249,12 @@ void Strategy::queryOp(OperationContext* txn, const NamespaceString& nss, DbMess
cursorId.getValue());
}
-void Strategy::clientCommandOp(OperationContext* txn, const NamespaceString& nss, DbMessage* dbm) {
+void Strategy::clientCommandOp(OperationContext* opCtx,
+ const NamespaceString& nss,
+ DbMessage* dbm) {
const QueryMessage q(*dbm);
- Client* const client = txn->getClient();
+ Client* const client = opCtx->getClient();
LOG(3) << "command: " << q.ns << " " << redact(q.query) << " ntoreturn: " << q.ntoreturn
<< " options: " << q.queryOptions;
@@ -282,7 +284,7 @@ void Strategy::clientCommandOp(OperationContext* txn, const NamespaceString& nss
const NamespaceString interposedNss("admin", "$cmd");
BSONObjBuilder reply;
runAgainstRegistered(
- txn, interposedNss.ns().c_str(), interposedCmd, reply, q.queryOptions);
+ opCtx, interposedNss.ns().c_str(), interposedCmd, reply, q.queryOptions);
replyToQuery(0, client->session(), dbm->msg(), reply.done());
};
@@ -336,7 +338,7 @@ void Strategy::clientCommandOp(OperationContext* txn, const NamespaceString& nss
const int maxTimeMS =
uassertStatusOK(QueryRequest::parseMaxTimeMS(cmdObj[QueryRequest::cmdOptionMaxTimeMS]));
if (maxTimeMS > 0) {
- txn->setDeadlineAfterNowBy(Milliseconds{maxTimeMS});
+ opCtx->setDeadlineAfterNowBy(Milliseconds{maxTimeMS});
}
int loops = 5;
@@ -346,7 +348,7 @@ void Strategy::clientCommandOp(OperationContext* txn, const NamespaceString& nss
OpQueryReplyBuilder reply;
{
BSONObjBuilder builder(reply.bufBuilderForResults());
- runAgainstRegistered(txn, q.ns, cmdObj, builder, q.queryOptions);
+ runAgainstRegistered(opCtx, q.ns, cmdObj, builder, q.queryOptions);
}
reply.sendCommandReply(client->session(), dbm->msg());
return;
@@ -361,13 +363,13 @@ void Strategy::clientCommandOp(OperationContext* txn, const NamespaceString& nss
// For legacy reasons, ns may not actually be set in the exception
const std::string staleNS(e.getns().empty() ? std::string(q.ns) : e.getns());
- ShardConnection::checkMyConnectionVersions(txn, staleNS);
+ ShardConnection::checkMyConnectionVersions(opCtx, staleNS);
if (loops < 4) {
// This throws out the entire database cache entry in response to
// StaleConfigException instead of just the collection which encountered it. There
// is no good reason for it other than the lack of lower-granularity cache
// invalidation.
- Grid::get(txn)->catalogCache()->invalidate(NamespaceString(staleNS).db());
+ Grid::get(opCtx)->catalogCache()->invalidate(NamespaceString(staleNS).db());
}
} catch (const DBException& e) {
OpQueryReplyBuilder reply;
@@ -381,7 +383,7 @@ void Strategy::clientCommandOp(OperationContext* txn, const NamespaceString& nss
}
}
-void Strategy::commandOp(OperationContext* txn,
+void Strategy::commandOp(OperationContext* opCtx,
const string& db,
const BSONObj& command,
int options,
@@ -395,7 +397,7 @@ void Strategy::commandOp(OperationContext* txn,
qSpec, CommandInfo(versionedNS, targetingQuery, targetingCollation));
// Initialize the cursor
- cursor.init(txn);
+ cursor.init(opCtx);
set<ShardId> shardIds;
cursor.getQueryShardIds(shardIds);
@@ -411,7 +413,7 @@ void Strategy::commandOp(OperationContext* txn,
}
}
-void Strategy::getMore(OperationContext* txn, const NamespaceString& nss, DbMessage* dbm) {
+void Strategy::getMore(OperationContext* opCtx, const NamespaceString& nss, DbMessage* dbm) {
const int ntoreturn = dbm->pullInt();
uassert(
34424, str::stream() << "Invalid ntoreturn for OP_GET_MORE: " << ntoreturn, ntoreturn >= 0);
@@ -419,12 +421,12 @@ void Strategy::getMore(OperationContext* txn, const NamespaceString& nss, DbMess
globalOpCounters.gotGetMore();
- Client* const client = txn->getClient();
+ Client* const client = opCtx->getClient();
// TODO: Handle stale config exceptions here from coll being dropped or sharded during op for
// now has same semantics as legacy request.
- auto statusGetDb = Grid::get(txn)->catalogCache()->getDatabase(txn, nss.db());
+ auto statusGetDb = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, nss.db());
if (statusGetDb == ErrorCodes::NamespaceNotFound) {
replyToQuery(ResultFlag_CursorNotFound, client->session(), dbm->msg(), 0, 0, 0);
return;
@@ -438,7 +440,7 @@ void Strategy::getMore(OperationContext* txn, const NamespaceString& nss, DbMess
GetMoreRequest getMoreRequest(nss, cursorId, batchSize, boost::none, boost::none, boost::none);
- auto cursorResponse = ClusterFind::runGetMore(txn, getMoreRequest);
+ auto cursorResponse = ClusterFind::runGetMore(opCtx, getMoreRequest);
if (cursorResponse == ErrorCodes::CursorNotFound) {
replyToQuery(ResultFlag_CursorNotFound, client->session(), dbm->msg(), 0, 0, 0);
return;
@@ -464,7 +466,7 @@ void Strategy::getMore(OperationContext* txn, const NamespaceString& nss, DbMess
cursorResponse.getValue().getCursorId());
}
-void Strategy::killCursors(OperationContext* txn, DbMessage* dbm) {
+void Strategy::killCursors(OperationContext* opCtx, DbMessage* dbm) {
const int numCursors = dbm->pullInt();
massert(34425,
str::stream() << "Invalid killCursors message. numCursors: " << numCursors
@@ -481,9 +483,9 @@ void Strategy::killCursors(OperationContext* txn, DbMessage* dbm) {
ConstDataCursor cursors(dbm->getArray(numCursors));
- Client* const client = txn->getClient();
+ Client* const client = opCtx->getClient();
AuthorizationSession* const authSession = AuthorizationSession::get(client);
- ClusterCursorManager* const manager = Grid::get(txn)->getCursorManager();
+ ClusterCursorManager* const manager = Grid::get(opCtx)->getCursorManager();
for (int i = 0; i < numCursors; ++i) {
const CursorId cursorId = cursors.readAndAdvance<LittleEndian<int64_t>>();
@@ -517,13 +519,13 @@ void Strategy::killCursors(OperationContext* txn, DbMessage* dbm) {
}
}
-void Strategy::writeOp(OperationContext* txn, DbMessage* dbm) {
+void Strategy::writeOp(OperationContext* opCtx, DbMessage* dbm) {
OwnedPointerVector<BatchedCommandRequest> commandRequestsOwned;
std::vector<BatchedCommandRequest*>& commandRequests = commandRequestsOwned.mutableVector();
msgToBatchRequests(dbm->msg(), &commandRequests);
- auto& clientLastError = LastError::get(txn->getClient());
+ auto& clientLastError = LastError::get(opCtx->getClient());
for (auto it = commandRequests.begin(); it != commandRequests.end(); ++it) {
// Multiple commands registered to last error as multiple requests
@@ -546,7 +548,7 @@ void Strategy::writeOp(OperationContext* txn, DbMessage* dbm) {
BSONObj commandBSON = commandRequest->toBSON();
BSONObjBuilder builder;
- runAgainstRegistered(txn, cmdNS.c_str(), commandBSON, builder, 0);
+ runAgainstRegistered(opCtx, cmdNS.c_str(), commandBSON, builder, 0);
bool parsed = commandResponse.parseBSON(builder.done(), nullptr);
(void)parsed; // for compile
@@ -566,7 +568,7 @@ void Strategy::writeOp(OperationContext* txn, DbMessage* dbm) {
}
}
-Status Strategy::explainFind(OperationContext* txn,
+Status Strategy::explainFind(OperationContext* opCtx,
const BSONObj& findCommand,
const QueryRequest& qr,
ExplainCommon::Verbosity verbosity,
@@ -581,7 +583,7 @@ Status Strategy::explainFind(OperationContext* txn,
Timer timer;
std::vector<Strategy::CommandResult> shardResults;
- Strategy::commandOp(txn,
+ Strategy::commandOp(opCtx,
qr.nss().db().toString(),
explainCmdBob.obj(),
options,
@@ -601,13 +603,13 @@ Status Strategy::explainFind(OperationContext* txn,
const char* mongosStageName = ClusterExplain::getStageNameForReadOp(shardResults, findCommand);
return ClusterExplain::buildExplainResult(
- txn, shardResults, mongosStageName, millisElapsed, out);
+ opCtx, shardResults, mongosStageName, millisElapsed, out);
}
/**
* Called into by the commands infrastructure.
*/
-void execCommandClient(OperationContext* txn,
+void execCommandClient(OperationContext* opCtx,
Command* c,
int queryOptions,
const char* ns,
@@ -624,7 +626,7 @@ void execCommandClient(OperationContext* txn,
return;
}
- Status status = Command::checkAuthorization(c, txn, dbname, cmdObj);
+ Status status = Command::checkAuthorization(c, opCtx, dbname, cmdObj);
if (!status.isOK()) {
Command::appendCommandStatus(result, status);
return;
@@ -657,20 +659,20 @@ void execCommandClient(OperationContext* txn,
// attach tracking
rpc::TrackingMetadata trackingMetadata;
trackingMetadata.initWithOperName(c->getName());
- rpc::TrackingMetadata::get(txn) = trackingMetadata;
+ rpc::TrackingMetadata::get(opCtx) = trackingMetadata;
std::string errmsg;
bool ok = false;
try {
if (!supportsWriteConcern) {
- ok = c->run(txn, dbname, cmdObj, queryOptions, errmsg, result);
+ ok = c->run(opCtx, dbname, cmdObj, queryOptions, errmsg, result);
} else {
// Change the write concern while running the command.
- const auto oldWC = txn->getWriteConcern();
- ON_BLOCK_EXIT([&] { txn->setWriteConcern(oldWC); });
- txn->setWriteConcern(wcResult.getValue());
+ const auto oldWC = opCtx->getWriteConcern();
+ ON_BLOCK_EXIT([&] { opCtx->setWriteConcern(oldWC); });
+ opCtx->setWriteConcern(wcResult.getValue());
- ok = c->run(txn, dbname, cmdObj, queryOptions, errmsg, result);
+ ok = c->run(opCtx, dbname, cmdObj, queryOptions, errmsg, result);
}
} catch (const DBException& e) {
result.resetToEmpty();
diff --git a/src/mongo/s/commands/strategy.h b/src/mongo/s/commands/strategy.h
index 2b55c80621f..51a0d1673cc 100644
--- a/src/mongo/s/commands/strategy.h
+++ b/src/mongo/s/commands/strategy.h
@@ -56,26 +56,26 @@ public:
*
* Must not be called with legacy '.$cmd' commands.
*/
- static void queryOp(OperationContext* txn, const NamespaceString& nss, DbMessage* dbm);
+ static void queryOp(OperationContext* opCtx, const NamespaceString& nss, DbMessage* dbm);
/**
* Handles a legacy-style getMore request and sends the response back on success (or cursor not
* found) or throws on error.
*/
- static void getMore(OperationContext* txn, const NamespaceString& nss, DbMessage* dbm);
+ static void getMore(OperationContext* opCtx, const NamespaceString& nss, DbMessage* dbm);
/**
* Handles a legacy-style killCursors request. Doesn't send any response on success or throws on
* error.
*/
- static void killCursors(OperationContext* txn, DbMessage* dbm);
+ static void killCursors(OperationContext* opCtx, DbMessage* dbm);
/**
* Handles a legacy-style write operation request and updates the last error state on the client
* with the result from the operation. Doesn't send any response back and does not throw on
* errors.
*/
- static void writeOp(OperationContext* txn, DbMessage* dbm);
+ static void writeOp(OperationContext* opCtx, DbMessage* dbm);
/**
* Executes a legacy-style ($cmd namespace) command. Does not throw and returns the response
@@ -84,7 +84,9 @@ public:
* Catches StaleConfigException errors and retries the command automatically after refreshing
* the metadata for the failing namespace.
*/
- static void clientCommandOp(OperationContext* txn, const NamespaceString& nss, DbMessage* dbm);
+ static void clientCommandOp(OperationContext* opCtx,
+ const NamespaceString& nss,
+ DbMessage* dbm);
/**
* Helper to run an explain of a find operation on the shards. Fills 'out' with the result of
@@ -94,7 +96,7 @@ public:
* Used both if mongos receives an explain command and if it receives an OP_QUERY find with the
* $explain modifier.
*/
- static Status explainFind(OperationContext* txn,
+ static Status explainFind(OperationContext* opCtx,
const BSONObj& findCommand,
const QueryRequest& qr,
ExplainCommon::Verbosity verbosity,
@@ -117,7 +119,7 @@ public:
* TODO: Replace these methods and all other methods of command dispatch with a more general
* command op framework.
*/
- static void commandOp(OperationContext* txn,
+ static void commandOp(OperationContext* opCtx,
const std::string& db,
const BSONObj& command,
int options,
diff --git a/src/mongo/s/config.cpp b/src/mongo/s/config.cpp
index c16f671ba78..f5aec193923 100644
--- a/src/mongo/s/config.cpp
+++ b/src/mongo/s/config.cpp
@@ -81,7 +81,7 @@ void DBConfig::markNSNotSharded(const std::string& ns) {
}
}
-std::shared_ptr<ChunkManager> DBConfig::getChunkManagerIfExists(OperationContext* txn,
+std::shared_ptr<ChunkManager> DBConfig::getChunkManagerIfExists(OperationContext* opCtx,
const std::string& ns,
bool shouldReload,
bool forceReload) {
@@ -89,13 +89,13 @@ std::shared_ptr<ChunkManager> DBConfig::getChunkManagerIfExists(OperationContext
LastError::Disabled ignoreForGLE(&LastError::get(cc()));
try {
- return getChunkManager(txn, ns, shouldReload, forceReload);
+ return getChunkManager(opCtx, ns, shouldReload, forceReload);
} catch (const DBException&) {
return nullptr;
}
}
-std::shared_ptr<ChunkManager> DBConfig::getChunkManager(OperationContext* txn,
+std::shared_ptr<ChunkManager> DBConfig::getChunkManager(OperationContext* opCtx,
const std::string& ns,
bool shouldReload,
bool forceReload) {
@@ -113,7 +113,7 @@ std::shared_ptr<ChunkManager> DBConfig::getChunkManager(OperationContext* txn,
// Note: read the _reloadCount inside the _lock mutex, so _loadIfNeeded will always
// be forced to perform a reload.
const auto currentReloadIteration = _reloadCount.load();
- _loadIfNeeded(txn, currentReloadIteration);
+ _loadIfNeeded(opCtx, currentReloadIteration);
it = _collections.find(ns);
}
@@ -139,8 +139,8 @@ std::shared_ptr<ChunkManager> DBConfig::getChunkManager(OperationContext* txn,
// currently
std::vector<ChunkType> newestChunk;
if (oldVersion.isSet() && !forceReload) {
- uassertStatusOK(Grid::get(txn)->catalogClient(txn)->getChunks(
- txn,
+ uassertStatusOK(Grid::get(opCtx)->catalogClient(opCtx)->getChunks(
+ opCtx,
BSON(ChunkType::ns(ns)),
BSON(ChunkType::DEPRECATED_lastmod() << -1),
1,
@@ -200,7 +200,7 @@ std::shared_ptr<ChunkManager> DBConfig::getChunkManager(OperationContext* txn,
oldManager->getShardKeyPattern(),
oldManager->getDefaultCollator() ? oldManager->getDefaultCollator()->clone() : nullptr,
oldManager->isUnique()));
- tempChunkManager->loadExistingRanges(txn, oldManager.get());
+ tempChunkManager->loadExistingRanges(opCtx, oldManager.get());
if (!tempChunkManager->numChunks()) {
// Maybe we're not sharded any more, so do a full reload
@@ -208,16 +208,16 @@ std::shared_ptr<ChunkManager> DBConfig::getChunkManager(OperationContext* txn,
const bool successful = [&]() {
stdx::lock_guard<stdx::mutex> lk(_lock);
- return _loadIfNeeded(txn, currentReloadIteration);
+ return _loadIfNeeded(opCtx, currentReloadIteration);
}();
// If we aren't successful loading the database entry, we don't want to keep the stale
// object around which has invalid data.
if (!successful) {
- Grid::get(txn)->catalogCache()->invalidate(_name);
+ Grid::get(opCtx)->catalogCache()->invalidate(_name);
}
- return getChunkManager(txn, ns);
+ return getChunkManager(opCtx, ns);
}
}
@@ -277,20 +277,20 @@ std::shared_ptr<ChunkManager> DBConfig::getChunkManager(OperationContext* txn,
return ci.cm;
}
-bool DBConfig::load(OperationContext* txn) {
+bool DBConfig::load(OperationContext* opCtx) {
const auto currentReloadIteration = _reloadCount.load();
stdx::lock_guard<stdx::mutex> lk(_lock);
- return _loadIfNeeded(txn, currentReloadIteration);
+ return _loadIfNeeded(opCtx, currentReloadIteration);
}
-bool DBConfig::_loadIfNeeded(OperationContext* txn, Counter reloadIteration) {
+bool DBConfig::_loadIfNeeded(OperationContext* opCtx, Counter reloadIteration) {
if (reloadIteration != _reloadCount.load()) {
return true;
}
- const auto catalogClient = Grid::get(txn)->catalogClient(txn);
+ const auto catalogClient = Grid::get(opCtx)->catalogClient(opCtx);
- auto status = catalogClient->getDatabase(txn, _name);
+ auto status = catalogClient->getDatabase(opCtx, _name);
if (status == ErrorCodes::NamespaceNotFound) {
return false;
}
@@ -310,7 +310,7 @@ bool DBConfig::_loadIfNeeded(OperationContext* txn, Counter reloadIteration) {
std::vector<CollectionType> collections;
repl::OpTime configOpTimeWhenLoadingColl;
uassertStatusOK(
- catalogClient->getCollections(txn, &_name, &collections, &configOpTimeWhenLoadingColl));
+ catalogClient->getCollections(opCtx, &_name, &collections, &configOpTimeWhenLoadingColl));
invariant(configOpTimeWhenLoadingColl >= _configOpTime);
@@ -325,7 +325,7 @@ bool DBConfig::_loadIfNeeded(OperationContext* txn, Counter reloadIteration) {
if (!coll.getDropped()) {
std::unique_ptr<CollatorInterface> defaultCollator;
if (!coll.getDefaultCollation().isEmpty()) {
- auto statusWithCollator = CollatorFactoryInterface::get(txn->getServiceContext())
+ auto statusWithCollator = CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(coll.getDefaultCollation());
// The collation was validated upon collection creation.
@@ -342,7 +342,7 @@ bool DBConfig::_loadIfNeeded(OperationContext* txn, Counter reloadIteration) {
coll.getUnique()));
// Do the blocking collection load
- manager->loadExistingRanges(txn, nullptr);
+ manager->loadExistingRanges(opCtx, nullptr);
// Collections with no chunks are unsharded, no matter what the collections entry says
if (manager->numChunks()) {
diff --git a/src/mongo/s/config.h b/src/mongo/s/config.h
index 2b0ecbb7d07..bbd63cf3b3b 100644
--- a/src/mongo/s/config.h
+++ b/src/mongo/s/config.h
@@ -80,11 +80,11 @@ public:
*/
bool isSharded(const std::string& ns);
- std::shared_ptr<ChunkManager> getChunkManager(OperationContext* txn,
+ std::shared_ptr<ChunkManager> getChunkManager(OperationContext* opCtx,
const std::string& ns,
bool reload = false,
bool forceReload = false);
- std::shared_ptr<ChunkManager> getChunkManagerIfExists(OperationContext* txn,
+ std::shared_ptr<ChunkManager> getChunkManagerIfExists(OperationContext* opCtx,
const std::string& ns,
bool reload = false,
bool forceReload = false);
@@ -93,7 +93,7 @@ public:
* Returns true if it is successful at loading the DBConfig, false if the database is not found,
* and throws on all other errors.
*/
- bool load(OperationContext* txn);
+ bool load(OperationContext* opCtx);
protected:
typedef std::map<std::string, CollectionInfo> CollectionInfoMap;
@@ -105,7 +105,7 @@ protected:
* Also returns true without reloading if reloadIteration is not equal to the _reloadCount.
* This is to avoid multiple threads attempting to reload do duplicate work.
*/
- bool _loadIfNeeded(OperationContext* txn, Counter reloadIteration);
+ bool _loadIfNeeded(OperationContext* opCtx, Counter reloadIteration);
// All member variables are labeled with one of the following codes indicating the
// synchronization rules for accessing them.
diff --git a/src/mongo/s/config_server_client.cpp b/src/mongo/s/config_server_client.cpp
index d3dd7f88dea..bf290d58199 100644
--- a/src/mongo/s/config_server_client.cpp
+++ b/src/mongo/s/config_server_client.cpp
@@ -43,16 +43,16 @@ const ReadPreferenceSetting kPrimaryOnlyReadPreference{ReadPreference::PrimaryOn
} // namespace
-Status moveChunk(OperationContext* txn,
+Status moveChunk(OperationContext* opCtx,
const ChunkType& chunk,
const ShardId& newShardId,
int64_t maxChunkSizeBytes,
const MigrationSecondaryThrottleOptions& secondaryThrottle,
bool waitForDelete) {
- auto shardRegistry = Grid::get(txn)->shardRegistry();
+ auto shardRegistry = Grid::get(opCtx)->shardRegistry();
auto shard = shardRegistry->getConfigShard();
auto cmdResponseStatus = shard->runCommand(
- txn,
+ opCtx,
kPrimaryOnlyReadPreference,
"admin",
BalanceChunkRequest::serializeToMoveCommandForConfig(
@@ -65,11 +65,11 @@ Status moveChunk(OperationContext* txn,
return cmdResponseStatus.getValue().commandStatus;
}
-Status rebalanceChunk(OperationContext* txn, const ChunkType& chunk) {
- auto shardRegistry = Grid::get(txn)->shardRegistry();
+Status rebalanceChunk(OperationContext* opCtx, const ChunkType& chunk) {
+ auto shardRegistry = Grid::get(opCtx)->shardRegistry();
auto shard = shardRegistry->getConfigShard();
auto cmdResponseStatus = shard->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
kPrimaryOnlyReadPreference,
"admin",
BalanceChunkRequest::serializeToRebalanceCommandForConfig(chunk),
diff --git a/src/mongo/s/config_server_client.h b/src/mongo/s/config_server_client.h
index ec92a493aca..4e825ca1408 100644
--- a/src/mongo/s/config_server_client.h
+++ b/src/mongo/s/config_server_client.h
@@ -46,7 +46,7 @@ namespace configsvr_client {
/**
* Requests the balancer to move the specified chunk off of its current shard to the new shard.
*/
-Status moveChunk(OperationContext* txn,
+Status moveChunk(OperationContext* opCtx,
const ChunkType& chunk,
const ShardId& newShardId,
int64_t maxChunkSizeBytes,
@@ -57,7 +57,7 @@ Status moveChunk(OperationContext* txn,
* Requests the balancer to move the specified chunk off of its current shard to a shard, considered
* more appropriate under the balancing policy which is currently in effect.
*/
-Status rebalanceChunk(OperationContext* txn, const ChunkType& chunk);
+Status rebalanceChunk(OperationContext* opCtx, const ChunkType& chunk);
} // namespace configsvr_client
} // namespace mongo
diff --git a/src/mongo/s/config_server_test_fixture.cpp b/src/mongo/s/config_server_test_fixture.cpp
index 65e8dec3fe8..4f1e6246646 100644
--- a/src/mongo/s/config_server_test_fixture.cpp
+++ b/src/mongo/s/config_server_test_fixture.cpp
@@ -174,7 +174,7 @@ std::shared_ptr<Shard> ConfigServerTestFixture::getConfigShard() const {
return shardRegistry()->getConfigShard();
}
-Status ConfigServerTestFixture::insertToConfigCollection(OperationContext* txn,
+Status ConfigServerTestFixture::insertToConfigCollection(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& doc) {
auto insert(stdx::make_unique<BatchedInsertRequest>());
@@ -186,7 +186,7 @@ Status ConfigServerTestFixture::insertToConfigCollection(OperationContext* txn,
auto config = getConfigShard();
invariant(config);
- auto insertResponse = config->runCommand(txn,
+ auto insertResponse = config->runCommand(opCtx,
kReadPref,
ns.db().toString(),
request.toBSON(),
@@ -198,14 +198,14 @@ Status ConfigServerTestFixture::insertToConfigCollection(OperationContext* txn,
return status;
}
-StatusWith<BSONObj> ConfigServerTestFixture::findOneOnConfigCollection(OperationContext* txn,
+StatusWith<BSONObj> ConfigServerTestFixture::findOneOnConfigCollection(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& filter) {
auto config = getConfigShard();
invariant(config);
auto findStatus = config->exhaustiveFindOnConfig(
- txn, kReadPref, repl::ReadConcernLevel::kMajorityReadConcern, ns, filter, BSONObj(), 1);
+ opCtx, kReadPref, repl::ReadConcernLevel::kMajorityReadConcern, ns, filter, BSONObj(), 1);
if (!findStatus.isOK()) {
return findStatus.getStatus();
}
@@ -231,10 +231,10 @@ Status ConfigServerTestFixture::setupShards(const std::vector<ShardType>& shards
return Status::OK();
}
-StatusWith<ShardType> ConfigServerTestFixture::getShardDoc(OperationContext* txn,
+StatusWith<ShardType> ConfigServerTestFixture::getShardDoc(OperationContext* opCtx,
const std::string& shardId) {
auto doc = findOneOnConfigCollection(
- txn, NamespaceString(ShardType::ConfigNS), BSON(ShardType::name(shardId)));
+ opCtx, NamespaceString(ShardType::ConfigNS), BSON(ShardType::name(shardId)));
if (!doc.isOK()) {
if (doc.getStatus() == ErrorCodes::NoMatchingDocument) {
return {ErrorCodes::ShardNotFound,
@@ -258,21 +258,21 @@ Status ConfigServerTestFixture::setupChunks(const std::vector<ChunkType>& chunks
return Status::OK();
}
-StatusWith<ChunkType> ConfigServerTestFixture::getChunkDoc(OperationContext* txn,
+StatusWith<ChunkType> ConfigServerTestFixture::getChunkDoc(OperationContext* opCtx,
const BSONObj& minKey) {
auto doc = findOneOnConfigCollection(
- txn, NamespaceString(ChunkType::ConfigNS), BSON(ChunkType::min() << minKey));
+ opCtx, NamespaceString(ChunkType::ConfigNS), BSON(ChunkType::min() << minKey));
if (!doc.isOK())
return doc.getStatus();
return ChunkType::fromConfigBSON(doc.getValue());
}
-StatusWith<std::vector<BSONObj>> ConfigServerTestFixture::getIndexes(OperationContext* txn,
+StatusWith<std::vector<BSONObj>> ConfigServerTestFixture::getIndexes(OperationContext* opCtx,
const NamespaceString& ns) {
auto configShard = getConfigShard();
- auto response = configShard->runCommand(txn,
+ auto response = configShard->runCommand(opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
ns.db().toString(),
BSON("listIndexes" << ns.coll().toString()),
diff --git a/src/mongo/s/config_server_test_fixture.h b/src/mongo/s/config_server_test_fixture.h
index 8d8bafa0c96..411b3672902 100644
--- a/src/mongo/s/config_server_test_fixture.h
+++ b/src/mongo/s/config_server_test_fixture.h
@@ -57,14 +57,14 @@ public:
/**
* Insert a document to this config server to the specified namespace.
*/
- Status insertToConfigCollection(OperationContext* txn,
+ Status insertToConfigCollection(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& doc);
/**
* Reads a single document from a collection living on the config server.
*/
- StatusWith<BSONObj> findOneOnConfigCollection(OperationContext* txn,
+ StatusWith<BSONObj> findOneOnConfigCollection(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& filter);
@@ -77,7 +77,7 @@ public:
* Retrieves the shard document from the config server.
* Returns {ErrorCodes::ShardNotFound} if the given shard does not exists.
*/
- StatusWith<ShardType> getShardDoc(OperationContext* txn, const std::string& shardId);
+ StatusWith<ShardType> getShardDoc(OperationContext* opCtx, const std::string& shardId);
/**
* Setup the config.chunks collection to contain the given chunks.
@@ -87,12 +87,12 @@ public:
/**
* Retrieves the chunk document from the config server.
*/
- StatusWith<ChunkType> getChunkDoc(OperationContext* txn, const BSONObj& minKey);
+ StatusWith<ChunkType> getChunkDoc(OperationContext* opCtx, const BSONObj& minKey);
/**
* Returns the indexes definitions defined on a given collection.
*/
- StatusWith<std::vector<BSONObj>> getIndexes(OperationContext* txn, const NamespaceString& ns);
+ StatusWith<std::vector<BSONObj>> getIndexes(OperationContext* opCtx, const NamespaceString& ns);
/**
* Returns the stored raw pointer to the addShard TaskExecutor's NetworkInterface.
diff --git a/src/mongo/s/grid.h b/src/mongo/s/grid.h
index 1562afebba7..4170e3ee152 100644
--- a/src/mongo/s/grid.h
+++ b/src/mongo/s/grid.h
@@ -98,7 +98,7 @@ public:
* Returns a pointer to a ShardingCatalogClient to use for accessing catalog data stored on the
* config servers.
*/
- ShardingCatalogClient* catalogClient(OperationContext* txn) {
+ ShardingCatalogClient* catalogClient(OperationContext* opCtx) {
return _catalogClient.get();
}
diff --git a/src/mongo/s/local_sharding_info.cpp b/src/mongo/s/local_sharding_info.cpp
index 8cea70e5032..7b8926bac65 100644
--- a/src/mongo/s/local_sharding_info.cpp
+++ b/src/mongo/s/local_sharding_info.cpp
@@ -61,10 +61,10 @@ void enableLocalShardingInfo(ServiceContext* context, Handler handler) {
forService(context).registerHandler(handler);
}
-bool haveLocalShardingInfo(OperationContext* txn, const std::string& ns) {
- auto handler = forService(txn->getServiceContext()).getHandler();
+bool haveLocalShardingInfo(OperationContext* opCtx, const std::string& ns) {
+ auto handler = forService(opCtx->getServiceContext()).getHandler();
if (handler)
- return handler(txn, ns);
+ return handler(opCtx, ns);
return false;
}
diff --git a/src/mongo/s/local_sharding_info.h b/src/mongo/s/local_sharding_info.h
index 42be1a0176b..9be721de42b 100644
--- a/src/mongo/s/local_sharding_info.h
+++ b/src/mongo/s/local_sharding_info.h
@@ -48,6 +48,6 @@ void enableLocalShardingInfo(ServiceContext* context,
/**
* @return true if we have any shard info for the ns
*/
-bool haveLocalShardingInfo(OperationContext* txn, const std::string& ns);
+bool haveLocalShardingInfo(OperationContext* opCtx, const std::string& ns);
} // namespace mongo
diff --git a/src/mongo/s/ns_targeter.h b/src/mongo/s/ns_targeter.h
index 62fd02e5e22..95e19a81a53 100644
--- a/src/mongo/s/ns_targeter.h
+++ b/src/mongo/s/ns_targeter.h
@@ -83,7 +83,7 @@ public:
*
* Returns !OK with message if document could not be targeted for other reasons.
*/
- virtual Status targetInsert(OperationContext* txn,
+ virtual Status targetInsert(OperationContext* opCtx,
const BSONObj& doc,
ShardEndpoint** endpoint) const = 0;
@@ -92,7 +92,7 @@ public:
*
* Returns OK and fills the endpoints; returns a status describing the error otherwise.
*/
- virtual Status targetUpdate(OperationContext* txn,
+ virtual Status targetUpdate(OperationContext* opCtx,
const BatchedUpdateDocument& updateDoc,
std::vector<ShardEndpoint*>* endpoints) const = 0;
@@ -101,7 +101,7 @@ public:
*
* Returns OK and fills the endpoints; returns a status describing the error otherwise.
*/
- virtual Status targetDelete(OperationContext* txn,
+ virtual Status targetDelete(OperationContext* opCtx,
const BatchedDeleteDocument& deleteDoc,
std::vector<ShardEndpoint*>* endpoints) const = 0;
@@ -147,7 +147,7 @@ public:
* NOTE: This function may block for shared resources or network calls.
* Returns !OK with message if could not refresh
*/
- virtual Status refreshIfNeeded(OperationContext* txn, bool* wasChanged) = 0;
+ virtual Status refreshIfNeeded(OperationContext* opCtx, bool* wasChanged) = 0;
};
/**
diff --git a/src/mongo/s/query/async_results_merger.cpp b/src/mongo/s/query/async_results_merger.cpp
index afcf5257db0..614002978f1 100644
--- a/src/mongo/s/query/async_results_merger.cpp
+++ b/src/mongo/s/query/async_results_merger.cpp
@@ -253,7 +253,7 @@ ClusterQueryResult AsyncResultsMerger::nextReadyUnsorted() {
return {};
}
-Status AsyncResultsMerger::askForNextBatch_inlock(OperationContext* txn, size_t remoteIndex) {
+Status AsyncResultsMerger::askForNextBatch_inlock(OperationContext* opCtx, size_t remoteIndex) {
auto& remote = _remotes[remoteIndex];
invariant(!remote.cbHandle.isValid());
@@ -291,14 +291,14 @@ Status AsyncResultsMerger::askForNextBatch_inlock(OperationContext* txn, size_t
}
executor::RemoteCommandRequest request(
- remote.getTargetHost(), _params->nsString.db().toString(), cmdObj, _metadataObj, txn);
+ remote.getTargetHost(), _params->nsString.db().toString(), cmdObj, _metadataObj, opCtx);
auto callbackStatus =
_executor->scheduleRemoteCommand(request,
stdx::bind(&AsyncResultsMerger::handleBatchResponse,
this,
stdx::placeholders::_1,
- txn,
+ opCtx,
remoteIndex));
if (!callbackStatus.isOK()) {
return callbackStatus.getStatus();
@@ -317,7 +317,7 @@ Status AsyncResultsMerger::askForNextBatch_inlock(OperationContext* txn, size_t
* 3. Remotes that reached maximum retries will be in 'exhausted' state.
*/
StatusWith<executor::TaskExecutor::EventHandle> AsyncResultsMerger::nextEvent(
- OperationContext* txn) {
+ OperationContext* opCtx) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_lifecycleState != kAlive) {
@@ -345,7 +345,7 @@ StatusWith<executor::TaskExecutor::EventHandle> AsyncResultsMerger::nextEvent(
// If we already have established a cursor with this remote, and there is no outstanding
// request for which we have a valid callback handle, then schedule work to retrieve the
// next batch.
- auto nextBatchStatus = askForNextBatch_inlock(txn, i);
+ auto nextBatchStatus = askForNextBatch_inlock(opCtx, i);
if (!nextBatchStatus.isOK()) {
return nextBatchStatus;
}
@@ -391,7 +391,7 @@ StatusWith<CursorResponse> AsyncResultsMerger::parseCursorResponse(const BSONObj
void AsyncResultsMerger::handleBatchResponse(
const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData,
- OperationContext* txn,
+ OperationContext* opCtx,
size_t remoteIndex) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
@@ -426,7 +426,7 @@ void AsyncResultsMerger::handleBatchResponse(
// If the event handle is invalid, then the executor is in the middle of shutting down,
// and we can't schedule any more work for it to complete.
if (_killCursorsScheduledEvent.isValid()) {
- scheduleKillCursors_inlock(txn);
+ scheduleKillCursors_inlock(opCtx);
_executor->signalEvent(_killCursorsScheduledEvent);
}
@@ -571,7 +571,7 @@ void AsyncResultsMerger::handleBatchResponse(
// We do not ask for the next batch if the cursor is tailable, as batches received from remote
// tailable cursors should be passed through to the client without asking for more batches.
if (!_params->isTailable && !remote.hasNext() && !remote.exhausted()) {
- remote.status = askForNextBatch_inlock(txn, remoteIndex);
+ remote.status = askForNextBatch_inlock(opCtx, remoteIndex);
if (!remote.status.isOK()) {
return;
}
@@ -602,7 +602,7 @@ bool AsyncResultsMerger::haveOutstandingBatchRequests_inlock() {
return false;
}
-void AsyncResultsMerger::scheduleKillCursors_inlock(OperationContext* txn) {
+void AsyncResultsMerger::scheduleKillCursors_inlock(OperationContext* opCtx) {
invariant(_lifecycleState == kKillStarted);
invariant(_killCursorsScheduledEvent.isValid());
@@ -613,7 +613,7 @@ void AsyncResultsMerger::scheduleKillCursors_inlock(OperationContext* txn) {
BSONObj cmdObj = KillCursorsRequest(_params->nsString, {*remote.cursorId}).toBSON();
executor::RemoteCommandRequest request(
- remote.getTargetHost(), _params->nsString.db().toString(), cmdObj, txn);
+ remote.getTargetHost(), _params->nsString.db().toString(), cmdObj, opCtx);
_executor->scheduleRemoteCommand(
request,
@@ -627,7 +627,7 @@ void AsyncResultsMerger::handleKillCursorsResponse(
// We just ignore any killCursors command responses.
}
-executor::TaskExecutor::EventHandle AsyncResultsMerger::kill(OperationContext* txn) {
+executor::TaskExecutor::EventHandle AsyncResultsMerger::kill(OperationContext* opCtx) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_killCursorsScheduledEvent.isValid()) {
invariant(_lifecycleState != kAlive);
@@ -653,7 +653,7 @@ executor::TaskExecutor::EventHandle AsyncResultsMerger::kill(OperationContext* t
// remotes now. Otherwise, we have to wait until all responses are back, and then we can kill
// the remote cursors.
if (!haveOutstandingBatchRequests_inlock()) {
- scheduleKillCursors_inlock(txn);
+ scheduleKillCursors_inlock(opCtx);
_lifecycleState = kKillComplete;
_executor->signalEvent(_killCursorsScheduledEvent);
}
diff --git a/src/mongo/s/query/async_results_merger.h b/src/mongo/s/query/async_results_merger.h
index 3252a22bf0e..e6766a4faa3 100644
--- a/src/mongo/s/query/async_results_merger.h
+++ b/src/mongo/s/query/async_results_merger.h
@@ -154,7 +154,7 @@ public:
* the caller should call nextEvent() to retry the request on the hosts that errored. If
* ready() is true, then either the error was not retriable or it has exhausted max retries.
*/
- StatusWith<executor::TaskExecutor::EventHandle> nextEvent(OperationContext* txn);
+ StatusWith<executor::TaskExecutor::EventHandle> nextEvent(OperationContext* opCtx);
/**
* Starts shutting down this ARM. Returns a handle to an event which is signaled when this
@@ -169,7 +169,7 @@ public:
*
* May be called multiple times (idempotent).
*/
- executor::TaskExecutor::EventHandle kill(OperationContext* txn);
+ executor::TaskExecutor::EventHandle kill(OperationContext* opCtx);
private:
/**
@@ -291,7 +291,7 @@ private:
*
* Returns success if the command to retrieve the next batch was scheduled successfully.
*/
- Status askForNextBatch_inlock(OperationContext* txn, size_t remoteIndex);
+ Status askForNextBatch_inlock(OperationContext* opCtx, size_t remoteIndex);
/**
* Checks whether or not the remote cursors are all exhausted.
@@ -322,7 +322,7 @@ private:
* buffered.
*/
void handleBatchResponse(const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData,
- OperationContext* txn,
+ OperationContext* opCtx,
size_t remoteIndex);
/**
@@ -342,7 +342,7 @@ private:
/**
* Schedules a killCursors command to be run on all remote hosts that have open cursors.
*/
- void scheduleKillCursors_inlock(OperationContext* txn);
+ void scheduleKillCursors_inlock(OperationContext* opCtx);
// Not owned here.
executor::TaskExecutor* _executor;
diff --git a/src/mongo/s/query/cluster_client_cursor.h b/src/mongo/s/query/cluster_client_cursor.h
index bea7bbba7aa..bd34689e62f 100644
--- a/src/mongo/s/query/cluster_client_cursor.h
+++ b/src/mongo/s/query/cluster_client_cursor.h
@@ -64,7 +64,7 @@ public:
*
* A non-ok status is returned in case of any error.
*/
- virtual StatusWith<ClusterQueryResult> next(OperationContext* txn) = 0;
+ virtual StatusWith<ClusterQueryResult> next(OperationContext* opCtx) = 0;
/**
* Must be called before destruction to abandon a not-yet-exhausted cursor. If next() has
@@ -72,7 +72,7 @@ public:
*
* May block waiting for responses from remote hosts.
*/
- virtual void kill(OperationContext* txn) = 0;
+ virtual void kill(OperationContext* opCtx) = 0;
/**
* Returns whether or not this cursor is tailing a capped collection on a shard.
diff --git a/src/mongo/s/query/cluster_client_cursor_impl.cpp b/src/mongo/s/query/cluster_client_cursor_impl.cpp
index 9f3157651b8..24ffc0b220a 100644
--- a/src/mongo/s/query/cluster_client_cursor_impl.cpp
+++ b/src/mongo/s/query/cluster_client_cursor_impl.cpp
@@ -41,13 +41,13 @@
namespace mongo {
-ClusterClientCursorGuard::ClusterClientCursorGuard(OperationContext* txn,
+ClusterClientCursorGuard::ClusterClientCursorGuard(OperationContext* opCtx,
std::unique_ptr<ClusterClientCursor> ccc)
- : _txn(txn), _ccc(std::move(ccc)) {}
+ : _opCtx(opCtx), _ccc(std::move(ccc)) {}
ClusterClientCursorGuard::~ClusterClientCursorGuard() {
if (_ccc && !_ccc->remotesExhausted()) {
- _ccc->kill(_txn);
+ _ccc->kill(_opCtx);
}
}
@@ -59,12 +59,12 @@ std::unique_ptr<ClusterClientCursor> ClusterClientCursorGuard::releaseCursor() {
return std::move(_ccc);
}
-ClusterClientCursorGuard ClusterClientCursorImpl::make(OperationContext* txn,
+ClusterClientCursorGuard ClusterClientCursorImpl::make(OperationContext* opCtx,
executor::TaskExecutor* executor,
ClusterClientCursorParams&& params) {
std::unique_ptr<ClusterClientCursor> cursor(
new ClusterClientCursorImpl(executor, std::move(params)));
- return ClusterClientCursorGuard(txn, std::move(cursor));
+ return ClusterClientCursorGuard(opCtx, std::move(cursor));
}
ClusterClientCursorImpl::ClusterClientCursorImpl(executor::TaskExecutor* executor,
@@ -75,7 +75,7 @@ ClusterClientCursorImpl::ClusterClientCursorImpl(std::unique_ptr<RouterStageMock
ClusterClientCursorParams&& params)
: _params(std::move(params)), _root(std::move(root)) {}
-StatusWith<ClusterQueryResult> ClusterClientCursorImpl::next(OperationContext* txn) {
+StatusWith<ClusterQueryResult> ClusterClientCursorImpl::next(OperationContext* opCtx) {
// First return stashed results, if there are any.
if (!_stash.empty()) {
auto front = std::move(_stash.front());
@@ -84,15 +84,15 @@ StatusWith<ClusterQueryResult> ClusterClientCursorImpl::next(OperationContext* t
return {front};
}
- auto next = _root->next(txn);
+ auto next = _root->next(opCtx);
if (next.isOK() && !next.getValue().isEOF()) {
++_numReturnedSoFar;
}
return next;
}
-void ClusterClientCursorImpl::kill(OperationContext* txn) {
- _root->kill(txn);
+void ClusterClientCursorImpl::kill(OperationContext* opCtx) {
+ _root->kill(opCtx);
}
bool ClusterClientCursorImpl::isTailable() const {
diff --git a/src/mongo/s/query/cluster_client_cursor_impl.h b/src/mongo/s/query/cluster_client_cursor_impl.h
index 929cf655849..de4e09d0950 100644
--- a/src/mongo/s/query/cluster_client_cursor_impl.h
+++ b/src/mongo/s/query/cluster_client_cursor_impl.h
@@ -50,7 +50,7 @@ class ClusterClientCursorGuard final {
MONGO_DISALLOW_COPYING(ClusterClientCursorGuard);
public:
- ClusterClientCursorGuard(OperationContext* txn, std::unique_ptr<ClusterClientCursor> ccc);
+ ClusterClientCursorGuard(OperationContext* opCtx, std::unique_ptr<ClusterClientCursor> ccc);
/**
* If a cursor is owned, safely destroys the cursor, cleaning up remote cursor state if
@@ -74,7 +74,7 @@ public:
std::unique_ptr<ClusterClientCursor> releaseCursor();
private:
- OperationContext* _txn;
+ OperationContext* _opCtx;
std::unique_ptr<ClusterClientCursor> _ccc;
};
@@ -85,7 +85,7 @@ public:
/**
* Constructs a CCC whose safe cleanup is ensured by an RAII object.
*/
- static ClusterClientCursorGuard make(OperationContext* txn,
+ static ClusterClientCursorGuard make(OperationContext* opCtx,
executor::TaskExecutor* executor,
ClusterClientCursorParams&& params);
@@ -95,9 +95,9 @@ public:
ClusterClientCursorImpl(std::unique_ptr<RouterStageMock> root,
ClusterClientCursorParams&& params);
- StatusWith<ClusterQueryResult> next(OperationContext* txn) final;
+ StatusWith<ClusterQueryResult> next(OperationContext* opCtx) final;
- void kill(OperationContext* txn) final;
+ void kill(OperationContext* opCtx) final;
bool isTailable() const final;
diff --git a/src/mongo/s/query/cluster_client_cursor_mock.cpp b/src/mongo/s/query/cluster_client_cursor_mock.cpp
index 533773742e7..28a4f2643f3 100644
--- a/src/mongo/s/query/cluster_client_cursor_mock.cpp
+++ b/src/mongo/s/query/cluster_client_cursor_mock.cpp
@@ -43,7 +43,7 @@ ClusterClientCursorMock::~ClusterClientCursorMock() {
invariant(_exhausted || _killed);
}
-StatusWith<ClusterQueryResult> ClusterClientCursorMock::next(OperationContext* txn) {
+StatusWith<ClusterQueryResult> ClusterClientCursorMock::next(OperationContext* opCtx) {
invariant(!_killed);
if (_resultsQueue.empty()) {
@@ -66,7 +66,7 @@ long long ClusterClientCursorMock::getNumReturnedSoFar() const {
return _numReturnedSoFar;
}
-void ClusterClientCursorMock::kill(OperationContext* txn) {
+void ClusterClientCursorMock::kill(OperationContext* opCtx) {
_killed = true;
if (_killCallback) {
_killCallback();
diff --git a/src/mongo/s/query/cluster_client_cursor_mock.h b/src/mongo/s/query/cluster_client_cursor_mock.h
index 7011911ce67..baea6660535 100644
--- a/src/mongo/s/query/cluster_client_cursor_mock.h
+++ b/src/mongo/s/query/cluster_client_cursor_mock.h
@@ -43,9 +43,9 @@ public:
~ClusterClientCursorMock();
- StatusWith<ClusterQueryResult> next(OperationContext* txn) final;
+ StatusWith<ClusterQueryResult> next(OperationContext* opCtx) final;
- void kill(OperationContext* txn) final;
+ void kill(OperationContext* opCtx) final;
bool isTailable() const final;
diff --git a/src/mongo/s/query/cluster_cursor_manager.cpp b/src/mongo/s/query/cluster_cursor_manager.cpp
index 2b4e68ac2cf..85d396490c6 100644
--- a/src/mongo/s/query/cluster_cursor_manager.cpp
+++ b/src/mongo/s/query/cluster_cursor_manager.cpp
@@ -110,9 +110,9 @@ ClusterCursorManager::PinnedCursor& ClusterCursorManager::PinnedCursor::operator
return *this;
}
-StatusWith<ClusterQueryResult> ClusterCursorManager::PinnedCursor::next(OperationContext* txn) {
+StatusWith<ClusterQueryResult> ClusterCursorManager::PinnedCursor::next(OperationContext* opCtx) {
invariant(_cursor);
- return _cursor->next(txn);
+ return _cursor->next(opCtx);
}
bool ClusterCursorManager::PinnedCursor::isTailable() const {
@@ -187,7 +187,7 @@ void ClusterCursorManager::shutdown() {
}
StatusWith<CursorId> ClusterCursorManager::registerCursor(
- OperationContext* txn,
+ OperationContext* opCtx,
std::unique_ptr<ClusterClientCursor> cursor,
const NamespaceString& nss,
CursorType cursorType,
@@ -199,7 +199,7 @@ StatusWith<CursorId> ClusterCursorManager::registerCursor(
if (_inShutdown) {
lk.unlock();
- cursor->kill(txn);
+ cursor->kill(opCtx);
return Status(ErrorCodes::ShutdownInProgress,
"Cannot register new cursors as we are in the process of shutting down");
}
@@ -246,7 +246,7 @@ StatusWith<CursorId> ClusterCursorManager::registerCursor(
}
StatusWith<ClusterCursorManager::PinnedCursor> ClusterCursorManager::checkOutCursor(
- const NamespaceString& nss, CursorId cursorId, OperationContext* txn) {
+ const NamespaceString& nss, CursorId cursorId, OperationContext* opCtx) {
// Read the clock out of the lock.
const auto now = _clockSource->now();
diff --git a/src/mongo/s/query/cluster_cursor_manager.h b/src/mongo/s/query/cluster_cursor_manager.h
index 6126ef0757e..ad320452b3b 100644
--- a/src/mongo/s/query/cluster_cursor_manager.h
+++ b/src/mongo/s/query/cluster_cursor_manager.h
@@ -154,7 +154,7 @@ public:
*
* Can block.
*/
- StatusWith<ClusterQueryResult> next(OperationContext* txn);
+ StatusWith<ClusterQueryResult> next(OperationContext* opCtx);
/**
* Returns whether or not the underlying cursor is tailing a capped collection. Cannot be
@@ -261,7 +261,7 @@ public:
*
* Does not block.
*/
- StatusWith<CursorId> registerCursor(OperationContext* txn,
+ StatusWith<CursorId> registerCursor(OperationContext* opCtx,
std::unique_ptr<ClusterClientCursor> cursor,
const NamespaceString& nss,
CursorType cursorType,
@@ -282,7 +282,7 @@ public:
*/
StatusWith<PinnedCursor> checkOutCursor(const NamespaceString& nss,
CursorId cursorId,
- OperationContext* txn);
+ OperationContext* opCtx);
/**
* Informs the manager that the given cursor should be killed. The cursor need not necessarily
diff --git a/src/mongo/s/query/cluster_find.cpp b/src/mongo/s/query/cluster_find.cpp
index c8790a8cf83..d944954635a 100644
--- a/src/mongo/s/query/cluster_find.cpp
+++ b/src/mongo/s/query/cluster_find.cpp
@@ -149,14 +149,14 @@ StatusWith<std::unique_ptr<QueryRequest>> transformQueryForShards(const QueryReq
return std::move(newQR);
}
-StatusWith<CursorId> runQueryWithoutRetrying(OperationContext* txn,
+StatusWith<CursorId> runQueryWithoutRetrying(OperationContext* opCtx,
const CanonicalQuery& query,
const ReadPreferenceSetting& readPref,
ChunkManager* chunkManager,
std::shared_ptr<Shard> primary,
std::vector<BSONObj>* results,
BSONObj* viewDefinition) {
- auto shardRegistry = Grid::get(txn)->shardRegistry();
+ auto shardRegistry = Grid::get(opCtx)->shardRegistry();
// Get the set of shards on which we will run the query.
std::vector<std::shared_ptr<Shard>> shards;
@@ -166,13 +166,13 @@ StatusWith<CursorId> runQueryWithoutRetrying(OperationContext* txn,
invariant(chunkManager);
std::set<ShardId> shardIds;
- chunkManager->getShardIdsForQuery(txn,
+ chunkManager->getShardIdsForQuery(opCtx,
query.getQueryRequest().getFilter(),
query.getQueryRequest().getCollation(),
&shardIds);
for (auto id : shardIds) {
- auto shardStatus = shardRegistry->getShard(txn, id);
+ auto shardStatus = shardRegistry->getShard(opCtx, id);
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
@@ -231,12 +231,12 @@ StatusWith<CursorId> runQueryWithoutRetrying(OperationContext* txn,
}
auto ccc = ClusterClientCursorImpl::make(
- txn, Grid::get(txn)->getExecutorPool()->getArbitraryExecutor(), std::move(params));
+ opCtx, Grid::get(opCtx)->getExecutorPool()->getArbitraryExecutor(), std::move(params));
auto cursorState = ClusterCursorManager::CursorState::NotExhausted;
int bytesBuffered = 0;
while (!FindCommon::enoughForFirstBatch(query.getQueryRequest(), results->size())) {
- auto next = ccc->next(txn);
+ auto next = ccc->next(opCtx);
if (!next.isOK()) {
if (viewDefinition &&
@@ -289,21 +289,21 @@ StatusWith<CursorId> runQueryWithoutRetrying(OperationContext* txn,
}
// Register the cursor with the cursor manager.
- auto cursorManager = Grid::get(txn)->getCursorManager();
+ auto cursorManager = Grid::get(opCtx)->getCursorManager();
const auto cursorType = chunkManager ? ClusterCursorManager::CursorType::NamespaceSharded
: ClusterCursorManager::CursorType::NamespaceNotSharded;
const auto cursorLifetime = query.getQueryRequest().isNoCursorTimeout()
? ClusterCursorManager::CursorLifetime::Immortal
: ClusterCursorManager::CursorLifetime::Mortal;
return cursorManager->registerCursor(
- txn, ccc.releaseCursor(), query.nss(), cursorType, cursorLifetime);
+ opCtx, ccc.releaseCursor(), query.nss(), cursorType, cursorLifetime);
}
} // namespace
const size_t ClusterFind::kMaxStaleConfigRetries = 10;
-StatusWith<CursorId> ClusterFind::runQuery(OperationContext* txn,
+StatusWith<CursorId> ClusterFind::runQuery(OperationContext* opCtx,
const CanonicalQuery& query,
const ReadPreferenceSetting& readPref,
std::vector<BSONObj>* results,
@@ -322,7 +322,7 @@ StatusWith<CursorId> ClusterFind::runQuery(OperationContext* txn,
// Re-target and re-send the initial find command to the shards until we have established the
// shard version.
for (size_t retries = 1; retries <= kMaxStaleConfigRetries; ++retries) {
- auto scopedCMStatus = ScopedChunkManager::get(txn, query.nss());
+ auto scopedCMStatus = ScopedChunkManager::get(opCtx, query.nss());
if (scopedCMStatus == ErrorCodes::NamespaceNotFound) {
// If the database doesn't exist, we successfully return an empty result set without
// creating a cursor.
@@ -333,8 +333,13 @@ StatusWith<CursorId> ClusterFind::runQuery(OperationContext* txn,
const auto& scopedCM = scopedCMStatus.getValue();
- auto cursorId = runQueryWithoutRetrying(
- txn, query, readPref, scopedCM.cm().get(), scopedCM.primary(), results, viewDefinition);
+ auto cursorId = runQueryWithoutRetrying(opCtx,
+ query,
+ readPref,
+ scopedCM.cm().get(),
+ scopedCM.primary(),
+ results,
+ viewDefinition);
if (cursorId.isOK()) {
return cursorId;
}
@@ -353,9 +358,9 @@ StatusWith<CursorId> ClusterFind::runQuery(OperationContext* txn,
<< redact(status);
if (status == ErrorCodes::StaleEpoch) {
- Grid::get(txn)->catalogCache()->invalidate(query.nss().db().toString());
+ Grid::get(opCtx)->catalogCache()->invalidate(query.nss().db().toString());
} else {
- scopedCM.db()->getChunkManagerIfExists(txn, query.nss().ns(), true);
+ scopedCM.db()->getChunkManagerIfExists(opCtx, query.nss().ns(), true);
}
}
@@ -364,11 +369,11 @@ StatusWith<CursorId> ClusterFind::runQuery(OperationContext* txn,
<< " times without successfully establishing shard version."};
}
-StatusWith<CursorResponse> ClusterFind::runGetMore(OperationContext* txn,
+StatusWith<CursorResponse> ClusterFind::runGetMore(OperationContext* opCtx,
const GetMoreRequest& request) {
- auto cursorManager = Grid::get(txn)->getCursorManager();
+ auto cursorManager = Grid::get(opCtx)->getCursorManager();
- auto pinnedCursor = cursorManager->checkOutCursor(request.nss, request.cursorid, txn);
+ auto pinnedCursor = cursorManager->checkOutCursor(request.nss, request.cursorid, opCtx);
if (!pinnedCursor.isOK()) {
return pinnedCursor.getStatus();
}
@@ -391,7 +396,7 @@ StatusWith<CursorResponse> ClusterFind::runGetMore(OperationContext* txn,
long long startingFrom = pinnedCursor.getValue().getNumReturnedSoFar();
auto cursorState = ClusterCursorManager::CursorState::NotExhausted;
while (!FindCommon::enoughForGetMore(batchSize, batch.size())) {
- auto next = pinnedCursor.getValue().next(txn);
+ auto next = pinnedCursor.getValue().next(opCtx);
if (!next.isOK()) {
return next.getStatus();
}
diff --git a/src/mongo/s/query/cluster_find.h b/src/mongo/s/query/cluster_find.h
index 22d7ad89b04..5a011d27958 100644
--- a/src/mongo/s/query/cluster_find.h
+++ b/src/mongo/s/query/cluster_find.h
@@ -66,7 +66,7 @@ public:
* If a CommandOnShardedViewNotSupportedOnMongod error is returned, then 'viewDefinition', if
* not null, will contain a view definition.
*/
- static StatusWith<CursorId> runQuery(OperationContext* txn,
+ static StatusWith<CursorId> runQuery(OperationContext* opCtx,
const CanonicalQuery& query,
const ReadPreferenceSetting& readPref,
std::vector<BSONObj>* results,
@@ -75,7 +75,7 @@ public:
/**
* Executes the getMore request 'request', and on success returns a CursorResponse.
*/
- static StatusWith<CursorResponse> runGetMore(OperationContext* txn,
+ static StatusWith<CursorResponse> runGetMore(OperationContext* opCtx,
const GetMoreRequest& request);
/**
diff --git a/src/mongo/s/query/router_exec_stage.h b/src/mongo/s/query/router_exec_stage.h
index 5fcb6053e58..f6128a53e43 100644
--- a/src/mongo/s/query/router_exec_stage.h
+++ b/src/mongo/s/query/router_exec_stage.h
@@ -66,13 +66,13 @@ public:
* holding on to a subset of the returned results and need to minimize memory usage, call copy()
* on the BSONObjs.
*/
- virtual StatusWith<ClusterQueryResult> next(OperationContext* txn) = 0;
+ virtual StatusWith<ClusterQueryResult> next(OperationContext* opCtx) = 0;
/**
* Must be called before destruction to abandon a not-yet-exhausted plan. May block waiting for
* responses from remote hosts.
*/
- virtual void kill(OperationContext* txn) = 0;
+ virtual void kill(OperationContext* opCtx) = 0;
/**
* Returns whether or not all the remote cursors are exhausted.
diff --git a/src/mongo/s/query/router_stage_limit.cpp b/src/mongo/s/query/router_stage_limit.cpp
index 4a1a428a533..ea90251eef6 100644
--- a/src/mongo/s/query/router_stage_limit.cpp
+++ b/src/mongo/s/query/router_stage_limit.cpp
@@ -39,12 +39,12 @@ RouterStageLimit::RouterStageLimit(std::unique_ptr<RouterExecStage> child, long
invariant(limit > 0);
}
-StatusWith<ClusterQueryResult> RouterStageLimit::next(OperationContext* txn) {
+StatusWith<ClusterQueryResult> RouterStageLimit::next(OperationContext* opCtx) {
if (_returnedSoFar >= _limit) {
return {ClusterQueryResult()};
}
- auto childResult = getChildStage()->next(txn);
+ auto childResult = getChildStage()->next(opCtx);
if (!childResult.isOK()) {
return childResult;
}
@@ -55,8 +55,8 @@ StatusWith<ClusterQueryResult> RouterStageLimit::next(OperationContext* txn) {
return childResult;
}
-void RouterStageLimit::kill(OperationContext* txn) {
- getChildStage()->kill(txn);
+void RouterStageLimit::kill(OperationContext* opCtx) {
+ getChildStage()->kill(opCtx);
}
bool RouterStageLimit::remotesExhausted() {
diff --git a/src/mongo/s/query/router_stage_limit.h b/src/mongo/s/query/router_stage_limit.h
index 29fb85dd458..42223902cc1 100644
--- a/src/mongo/s/query/router_stage_limit.h
+++ b/src/mongo/s/query/router_stage_limit.h
@@ -39,9 +39,9 @@ class RouterStageLimit final : public RouterExecStage {
public:
RouterStageLimit(std::unique_ptr<RouterExecStage> child, long long limit);
- StatusWith<ClusterQueryResult> next(OperationContext* txn) final;
+ StatusWith<ClusterQueryResult> next(OperationContext* opCtx) final;
- void kill(OperationContext* txn) final;
+ void kill(OperationContext* opCtx) final;
bool remotesExhausted() final;
diff --git a/src/mongo/s/query/router_stage_merge.cpp b/src/mongo/s/query/router_stage_merge.cpp
index e66aaf91fc4..90a80e7161b 100644
--- a/src/mongo/s/query/router_stage_merge.cpp
+++ b/src/mongo/s/query/router_stage_merge.cpp
@@ -40,9 +40,9 @@ RouterStageMerge::RouterStageMerge(executor::TaskExecutor* executor,
ClusterClientCursorParams* params)
: _executor(executor), _arm(executor, params) {}
-StatusWith<ClusterQueryResult> RouterStageMerge::next(OperationContext* txn) {
+StatusWith<ClusterQueryResult> RouterStageMerge::next(OperationContext* opCtx) {
while (!_arm.ready()) {
- auto nextEventStatus = _arm.nextEvent(txn);
+ auto nextEventStatus = _arm.nextEvent(opCtx);
if (!nextEventStatus.isOK()) {
return nextEventStatus.getStatus();
}
@@ -55,8 +55,8 @@ StatusWith<ClusterQueryResult> RouterStageMerge::next(OperationContext* txn) {
return _arm.nextReady();
}
-void RouterStageMerge::kill(OperationContext* txn) {
- auto killEvent = _arm.kill(txn);
+void RouterStageMerge::kill(OperationContext* opCtx) {
+ auto killEvent = _arm.kill(opCtx);
if (!killEvent) {
// Mongos is shutting down.
return;
diff --git a/src/mongo/s/query/router_stage_merge.h b/src/mongo/s/query/router_stage_merge.h
index 58a8061355e..428a405b401 100644
--- a/src/mongo/s/query/router_stage_merge.h
+++ b/src/mongo/s/query/router_stage_merge.h
@@ -45,9 +45,9 @@ class RouterStageMerge final : public RouterExecStage {
public:
RouterStageMerge(executor::TaskExecutor* executor, ClusterClientCursorParams* params);
- StatusWith<ClusterQueryResult> next(OperationContext* txn) final;
+ StatusWith<ClusterQueryResult> next(OperationContext* opCtx) final;
- void kill(OperationContext* txn) final;
+ void kill(OperationContext* opCtx) final;
bool remotesExhausted() final;
diff --git a/src/mongo/s/query/router_stage_mock.cpp b/src/mongo/s/query/router_stage_mock.cpp
index c348018fe6f..e134340713a 100644
--- a/src/mongo/s/query/router_stage_mock.cpp
+++ b/src/mongo/s/query/router_stage_mock.cpp
@@ -50,7 +50,7 @@ void RouterStageMock::markRemotesExhausted() {
_remotesExhausted = true;
}
-StatusWith<ClusterQueryResult> RouterStageMock::next(OperationContext* txn) {
+StatusWith<ClusterQueryResult> RouterStageMock::next(OperationContext* opCtx) {
if (_resultsQueue.empty()) {
return {ClusterQueryResult()};
}
@@ -60,7 +60,7 @@ StatusWith<ClusterQueryResult> RouterStageMock::next(OperationContext* txn) {
return out;
}
-void RouterStageMock::kill(OperationContext* txn) {
+void RouterStageMock::kill(OperationContext* opCtx) {
// No child to kill.
}
diff --git a/src/mongo/s/query/router_stage_mock.h b/src/mongo/s/query/router_stage_mock.h
index dce077d8122..7cba32a81f6 100644
--- a/src/mongo/s/query/router_stage_mock.h
+++ b/src/mongo/s/query/router_stage_mock.h
@@ -44,9 +44,9 @@ class RouterStageMock final : public RouterExecStage {
public:
~RouterStageMock() final {}
- StatusWith<ClusterQueryResult> next(OperationContext* txn) final;
+ StatusWith<ClusterQueryResult> next(OperationContext* opCtx) final;
- void kill(OperationContext* txn) final;
+ void kill(OperationContext* opCtx) final;
bool remotesExhausted() final;
diff --git a/src/mongo/s/query/router_stage_remove_sortkey.cpp b/src/mongo/s/query/router_stage_remove_sortkey.cpp
index 9c58e489b13..9cb1e4d26c9 100644
--- a/src/mongo/s/query/router_stage_remove_sortkey.cpp
+++ b/src/mongo/s/query/router_stage_remove_sortkey.cpp
@@ -41,8 +41,8 @@ namespace mongo {
RouterStageRemoveSortKey::RouterStageRemoveSortKey(std::unique_ptr<RouterExecStage> child)
: RouterExecStage(std::move(child)) {}
-StatusWith<ClusterQueryResult> RouterStageRemoveSortKey::next(OperationContext* txn) {
- auto childResult = getChildStage()->next(txn);
+StatusWith<ClusterQueryResult> RouterStageRemoveSortKey::next(OperationContext* opCtx) {
+ auto childResult = getChildStage()->next(opCtx);
if (!childResult.isOK() || !childResult.getValue().getResult()) {
return childResult;
}
@@ -59,8 +59,8 @@ StatusWith<ClusterQueryResult> RouterStageRemoveSortKey::next(OperationContext*
return {builder.obj()};
}
-void RouterStageRemoveSortKey::kill(OperationContext* txn) {
- getChildStage()->kill(txn);
+void RouterStageRemoveSortKey::kill(OperationContext* opCtx) {
+ getChildStage()->kill(opCtx);
}
bool RouterStageRemoveSortKey::remotesExhausted() {
diff --git a/src/mongo/s/query/router_stage_remove_sortkey.h b/src/mongo/s/query/router_stage_remove_sortkey.h
index 291cf01a803..e3599a3e9b0 100644
--- a/src/mongo/s/query/router_stage_remove_sortkey.h
+++ b/src/mongo/s/query/router_stage_remove_sortkey.h
@@ -41,9 +41,9 @@ class RouterStageRemoveSortKey final : public RouterExecStage {
public:
RouterStageRemoveSortKey(std::unique_ptr<RouterExecStage> child);
- StatusWith<ClusterQueryResult> next(OperationContext* txn) final;
+ StatusWith<ClusterQueryResult> next(OperationContext* opCtx) final;
- void kill(OperationContext* txn) final;
+ void kill(OperationContext* opCtx) final;
bool remotesExhausted() final;
diff --git a/src/mongo/s/query/router_stage_skip.cpp b/src/mongo/s/query/router_stage_skip.cpp
index a6bec5c8733..6763ca5808b 100644
--- a/src/mongo/s/query/router_stage_skip.cpp
+++ b/src/mongo/s/query/router_stage_skip.cpp
@@ -39,9 +39,9 @@ RouterStageSkip::RouterStageSkip(std::unique_ptr<RouterExecStage> child, long lo
invariant(skip > 0);
}
-StatusWith<ClusterQueryResult> RouterStageSkip::next(OperationContext* txn) {
+StatusWith<ClusterQueryResult> RouterStageSkip::next(OperationContext* opCtx) {
while (_skippedSoFar < _skip) {
- auto next = getChildStage()->next(txn);
+ auto next = getChildStage()->next(opCtx);
if (!next.isOK()) {
return next;
}
@@ -53,11 +53,11 @@ StatusWith<ClusterQueryResult> RouterStageSkip::next(OperationContext* txn) {
++_skippedSoFar;
}
- return getChildStage()->next(txn);
+ return getChildStage()->next(opCtx);
}
-void RouterStageSkip::kill(OperationContext* txn) {
- getChildStage()->kill(txn);
+void RouterStageSkip::kill(OperationContext* opCtx) {
+ getChildStage()->kill(opCtx);
}
bool RouterStageSkip::remotesExhausted() {
diff --git a/src/mongo/s/query/router_stage_skip.h b/src/mongo/s/query/router_stage_skip.h
index c949271f79e..773220d4fe6 100644
--- a/src/mongo/s/query/router_stage_skip.h
+++ b/src/mongo/s/query/router_stage_skip.h
@@ -39,9 +39,9 @@ class RouterStageSkip final : public RouterExecStage {
public:
RouterStageSkip(std::unique_ptr<RouterExecStage> child, long long skip);
- StatusWith<ClusterQueryResult> next(OperationContext* txn) final;
+ StatusWith<ClusterQueryResult> next(OperationContext* opCtx) final;
- void kill(OperationContext* txn) final;
+ void kill(OperationContext* opCtx) final;
bool remotesExhausted() final;
diff --git a/src/mongo/s/query/store_possible_cursor.cpp b/src/mongo/s/query/store_possible_cursor.cpp
index 1e3b7d03306..8647871b6a7 100644
--- a/src/mongo/s/query/store_possible_cursor.cpp
+++ b/src/mongo/s/query/store_possible_cursor.cpp
@@ -39,7 +39,7 @@
namespace mongo {
-StatusWith<BSONObj> storePossibleCursor(OperationContext* txn,
+StatusWith<BSONObj> storePossibleCursor(OperationContext* opCtx,
const HostAndPort& server,
const BSONObj& cmdResult,
const NamespaceString& requestedNss,
@@ -62,10 +62,10 @@ StatusWith<BSONObj> storePossibleCursor(OperationContext* txn,
params.remotes.emplace_back(server, incomingCursorResponse.getValue().getCursorId());
- auto ccc = ClusterClientCursorImpl::make(txn, executor, std::move(params));
+ auto ccc = ClusterClientCursorImpl::make(opCtx, executor, std::move(params));
auto clusterCursorId =
- cursorManager->registerCursor(txn,
+ cursorManager->registerCursor(opCtx,
ccc.releaseCursor(),
requestedNss,
ClusterCursorManager::CursorType::NamespaceNotSharded,
diff --git a/src/mongo/s/query/store_possible_cursor.h b/src/mongo/s/query/store_possible_cursor.h
index f06c959b41c..03d61ac4d33 100644
--- a/src/mongo/s/query/store_possible_cursor.h
+++ b/src/mongo/s/query/store_possible_cursor.h
@@ -57,7 +57,7 @@ class TaskExecutor;
* BSONObj response document describing the newly-created cursor, which is suitable for returning to
* the client.
*/
-StatusWith<BSONObj> storePossibleCursor(OperationContext* txn,
+StatusWith<BSONObj> storePossibleCursor(OperationContext* opCtx,
const HostAndPort& server,
const BSONObj& cmdResult,
const NamespaceString& requestedNss,
diff --git a/src/mongo/s/s_sharding_server_status.cpp b/src/mongo/s/s_sharding_server_status.cpp
index 5be289d5d18..3b91159b110 100644
--- a/src/mongo/s/s_sharding_server_status.cpp
+++ b/src/mongo/s/s_sharding_server_status.cpp
@@ -45,16 +45,16 @@ public:
return true;
}
- BSONObj generateSection(OperationContext* txn,
+ BSONObj generateSection(OperationContext* opCtx,
const BSONElement& configElement) const override {
- auto shardRegistry = Grid::get(txn)->shardRegistry();
+ auto shardRegistry = Grid::get(opCtx)->shardRegistry();
invariant(shardRegistry);
BSONObjBuilder result;
result.append("configsvrConnectionString",
shardRegistry->getConfigServerConnectionString().toString());
- Grid::get(txn)->configOpTime().append(&result, "lastSeenConfigServerOpTime");
+ Grid::get(opCtx)->configOpTime().append(&result, "lastSeenConfigServerOpTime");
return result.obj();
}
diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp
index 2fab9570572..41ad5b8ce7b 100644
--- a/src/mongo/s/server.cpp
+++ b/src/mongo/s/server.cpp
@@ -136,23 +136,23 @@ static void cleanupTask() {
Client::initThreadIfNotAlready();
Client& client = cc();
ServiceContext::UniqueOperationContext uniqueTxn;
- OperationContext* txn = client.getOperationContext();
- if (!txn) {
+ OperationContext* opCtx = client.getOperationContext();
+ if (!opCtx) {
uniqueTxn = client.makeOperationContext();
- txn = uniqueTxn.get();
+ opCtx = uniqueTxn.get();
}
if (serviceContext)
serviceContext->setKillAllOperations();
- if (auto cursorManager = Grid::get(txn)->getCursorManager()) {
+ if (auto cursorManager = Grid::get(opCtx)->getCursorManager()) {
cursorManager->shutdown();
}
- if (auto pool = Grid::get(txn)->getExecutorPool()) {
+ if (auto pool = Grid::get(opCtx)->getExecutorPool()) {
pool->shutdownAndJoin();
}
- if (auto catalog = Grid::get(txn)->catalogClient(txn)) {
- catalog->shutDown(txn);
+ if (auto catalog = Grid::get(opCtx)->catalogClient(opCtx)) {
+ catalog->shutDown(opCtx);
}
}
@@ -173,7 +173,7 @@ static BSONObj buildErrReply(const DBException& ex) {
using namespace mongo;
-static Status initializeSharding(OperationContext* txn) {
+static Status initializeSharding(OperationContext* opCtx) {
auto targeterFactory = stdx::make_unique<RemoteCommandTargeterFactoryImpl>();
auto targeterFactoryPtr = targeterFactory.get();
@@ -198,9 +198,9 @@ static Status initializeSharding(OperationContext* txn) {
stdx::make_unique<ShardFactory>(std::move(buildersMap), std::move(targeterFactory));
Status status = initializeGlobalShardingState(
- txn,
+ opCtx,
mongosGlobalParams.configdbs,
- generateDistLockProcessId(txn),
+ generateDistLockProcessId(opCtx),
std::move(shardFactory),
[]() {
auto hookList = stdx::make_unique<rpc::EgressMetadataHookList>();
@@ -216,7 +216,7 @@ static Status initializeSharding(OperationContext* txn) {
return status;
}
- status = reloadShardRegistryUntilSuccess(txn);
+ status = reloadShardRegistryUntilSuccess(opCtx);
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/s/service_entry_point_mongos.cpp b/src/mongo/s/service_entry_point_mongos.cpp
index 67ac08e9354..623af0e3602 100644
--- a/src/mongo/s/service_entry_point_mongos.cpp
+++ b/src/mongo/s/service_entry_point_mongos.cpp
@@ -100,7 +100,7 @@ void ServiceEntryPointMongos::_sessionLoop(const transport::SessionHandle& sessi
uassertStatusOK(status);
}
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
const int32_t msgId = message.header().getId();
@@ -114,8 +114,8 @@ void ServiceEntryPointMongos::_sessionLoop(const transport::SessionHandle& sessi
// Start a new LastError session. Any exceptions thrown from here onwards will be returned
// to the caller (if the type of the message permits it).
- ClusterLastErrorInfo::get(txn->getClient()).newRequest();
- LastError::get(txn->getClient()).startRequest();
+ ClusterLastErrorInfo::get(opCtx->getClient()).newRequest();
+ LastError::get(opCtx->getClient()).startRequest();
DbMessage dbm(message);
@@ -135,7 +135,7 @@ void ServiceEntryPointMongos::_sessionLoop(const transport::SessionHandle& sessi
nss.db() != NamespaceString::kLocalDb);
}
- AuthorizationSession::get(txn->getClient())->startRequest(txn.get());
+ AuthorizationSession::get(opCtx->getClient())->startRequest(opCtx.get());
LOG(3) << "Request::process begin ns: " << nss << " msg id: " << msgId
<< " op: " << networkOpToString(op);
@@ -143,19 +143,19 @@ void ServiceEntryPointMongos::_sessionLoop(const transport::SessionHandle& sessi
switch (op) {
case dbQuery:
if (nss.isCommand() || nss.isSpecialCommand()) {
- Strategy::clientCommandOp(txn.get(), nss, &dbm);
+ Strategy::clientCommandOp(opCtx.get(), nss, &dbm);
} else {
- Strategy::queryOp(txn.get(), nss, &dbm);
+ Strategy::queryOp(opCtx.get(), nss, &dbm);
}
break;
case dbGetMore:
- Strategy::getMore(txn.get(), nss, &dbm);
+ Strategy::getMore(opCtx.get(), nss, &dbm);
break;
case dbKillCursors:
- Strategy::killCursors(txn.get(), &dbm);
+ Strategy::killCursors(opCtx.get(), &dbm);
break;
default:
- Strategy::writeOp(txn.get(), &dbm);
+ Strategy::writeOp(opCtx.get(), &dbm);
break;
}
@@ -172,7 +172,7 @@ void ServiceEntryPointMongos::_sessionLoop(const transport::SessionHandle& sessi
}
// We *always* populate the last error for now
- LastError::get(txn->getClient()).setLastError(ex.getCode(), ex.what());
+ LastError::get(opCtx->getClient()).setLastError(ex.getCode(), ex.what());
}
if ((counter++ & 0xf) == 0) {
diff --git a/src/mongo/s/shard_key_pattern.cpp b/src/mongo/s/shard_key_pattern.cpp
index 1a8e77658e1..ef0009bd8d6 100644
--- a/src/mongo/s/shard_key_pattern.cpp
+++ b/src/mongo/s/shard_key_pattern.cpp
@@ -265,7 +265,7 @@ static BSONElement findEqualityElement(const EqualityMatches& equalities, const
return extractKeyElementFromMatchable(matchable, suffixStr);
}
-StatusWith<BSONObj> ShardKeyPattern::extractShardKeyFromQuery(OperationContext* txn,
+StatusWith<BSONObj> ShardKeyPattern::extractShardKeyFromQuery(OperationContext* opCtx,
const BSONObj& basicQuery) const {
if (!isValid())
return StatusWith<BSONObj>(BSONObj());
@@ -273,7 +273,8 @@ StatusWith<BSONObj> ShardKeyPattern::extractShardKeyFromQuery(OperationContext*
auto qr = stdx::make_unique<QueryRequest>(NamespaceString(""));
qr->setFilter(basicQuery);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), ExtensionsCallbackNoop());
+ auto statusWithCQ =
+ CanonicalQuery::canonicalize(opCtx, std::move(qr), ExtensionsCallbackNoop());
if (!statusWithCQ.isOK()) {
return StatusWith<BSONObj>(statusWithCQ.getStatus());
}
diff --git a/src/mongo/s/shard_key_pattern.h b/src/mongo/s/shard_key_pattern.h
index 67c8eeb4a2f..ca4e01a4a5a 100644
--- a/src/mongo/s/shard_key_pattern.h
+++ b/src/mongo/s/shard_key_pattern.h
@@ -164,7 +164,7 @@ public:
* { a : { b : { $eq : "hi" } } } --> returns {} because the query language treats this as
* a : { $eq : { b : ... } }
*/
- StatusWith<BSONObj> extractShardKeyFromQuery(OperationContext* txn,
+ StatusWith<BSONObj> extractShardKeyFromQuery(OperationContext* opCtx,
const BSONObj& basicQuery) const;
BSONObj extractShardKeyFromQuery(const CanonicalQuery& query) const;
diff --git a/src/mongo/s/shard_key_pattern_test.cpp b/src/mongo/s/shard_key_pattern_test.cpp
index e3b31a7a5d2..2b4ddb22fae 100644
--- a/src/mongo/s/shard_key_pattern_test.cpp
+++ b/src/mongo/s/shard_key_pattern_test.cpp
@@ -261,9 +261,9 @@ TEST(ShardKeyPattern, ExtractDocShardKeyHashed) {
static BSONObj queryKey(const ShardKeyPattern& pattern, const BSONObj& query) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
- StatusWith<BSONObj> status = pattern.extractShardKeyFromQuery(txn.get(), query);
+ StatusWith<BSONObj> status = pattern.extractShardKeyFromQuery(opCtx.get(), query);
if (!status.isOK())
return BSONObj();
return status.getValue();
diff --git a/src/mongo/s/shard_util.cpp b/src/mongo/s/shard_util.cpp
index b38fd9657e7..8adf605d4a3 100644
--- a/src/mongo/s/shard_util.cpp
+++ b/src/mongo/s/shard_util.cpp
@@ -53,14 +53,14 @@ const char kShouldMigrate[] = "shouldMigrate";
} // namespace
-StatusWith<long long> retrieveTotalShardSize(OperationContext* txn, const ShardId& shardId) {
- auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+StatusWith<long long> retrieveTotalShardSize(OperationContext* opCtx, const ShardId& shardId) {
+ auto shardStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
auto listDatabasesStatus = shardStatus.getValue()->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
"admin",
BSON("listDatabases" << 1),
@@ -80,7 +80,7 @@ StatusWith<long long> retrieveTotalShardSize(OperationContext* txn, const ShardI
return totalSizeElem.numberLong();
}
-StatusWith<std::vector<BSONObj>> selectChunkSplitPoints(OperationContext* txn,
+StatusWith<std::vector<BSONObj>> selectChunkSplitPoints(OperationContext* opCtx,
const ShardId& shardId,
const NamespaceString& nss,
const ShardKeyPattern& shardKeyPattern,
@@ -96,13 +96,13 @@ StatusWith<std::vector<BSONObj>> selectChunkSplitPoints(OperationContext* txn,
cmd.append("maxChunkObjects", *maxObjs);
}
- auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+ auto shardStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
auto cmdStatus = shardStatus.getValue()->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
"admin",
cmd.obj(),
@@ -127,7 +127,7 @@ StatusWith<std::vector<BSONObj>> selectChunkSplitPoints(OperationContext* txn,
}
StatusWith<boost::optional<ChunkRange>> splitChunkAtMultiplePoints(
- OperationContext* txn,
+ OperationContext* opCtx,
const ShardId& shardId,
const NamespaceString& nss,
const ShardKeyPattern& shardKeyPattern,
@@ -176,12 +176,12 @@ StatusWith<boost::optional<ChunkRange>> splitChunkAtMultiplePoints(
Status status{ErrorCodes::InternalError, "Uninitialized value"};
BSONObj cmdResponse;
- auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+ auto shardStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
status = shardStatus.getStatus();
} else {
auto cmdStatus = shardStatus.getValue()->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"admin",
cmdObj,
diff --git a/src/mongo/s/shard_util.h b/src/mongo/s/shard_util.h
index 79fec95e897..4c3eaabac0a 100644
--- a/src/mongo/s/shard_util.h
+++ b/src/mongo/s/shard_util.h
@@ -58,7 +58,7 @@ namespace shardutil {
* ShardNotFound if shard by that id is not available on the registry
* NoSuchKey if the total shard size could not be retrieved
*/
-StatusWith<long long> retrieveTotalShardSize(OperationContext* txn, const ShardId& shardId);
+StatusWith<long long> retrieveTotalShardSize(OperationContext* opCtx, const ShardId& shardId);
/**
* Ask the specified shard to figure out the split points for a given chunk.
@@ -71,7 +71,7 @@ StatusWith<long long> retrieveTotalShardSize(OperationContext* txn, const ShardI
* maxObjs Limits the number of objects in each chunk. Zero means max, unspecified means use the
* server default.
*/
-StatusWith<std::vector<BSONObj>> selectChunkSplitPoints(OperationContext* txn,
+StatusWith<std::vector<BSONObj>> selectChunkSplitPoints(OperationContext* opCtx,
const ShardId& shardId,
const NamespaceString& nss,
const ShardKeyPattern& shardKeyPattern,
@@ -92,7 +92,7 @@ StatusWith<std::vector<BSONObj>> selectChunkSplitPoints(OperationContext* txn,
* splitPoints The set of points at which the chunk should be split.
*/
StatusWith<boost::optional<ChunkRange>> splitChunkAtMultiplePoints(
- OperationContext* txn,
+ OperationContext* opCtx,
const ShardId& shardId,
const NamespaceString& nss,
const ShardKeyPattern& shardKeyPattern,
diff --git a/src/mongo/s/sharding_egress_metadata_hook.cpp b/src/mongo/s/sharding_egress_metadata_hook.cpp
index f40f80f2e50..af263360a0d 100644
--- a/src/mongo/s/sharding_egress_metadata_hook.cpp
+++ b/src/mongo/s/sharding_egress_metadata_hook.cpp
@@ -51,13 +51,13 @@ namespace rpc {
using std::shared_ptr;
Status ShardingEgressMetadataHook::writeRequestMetadata(bool shardedConnection,
- OperationContext* txn,
+ OperationContext* opCtx,
const StringData target,
BSONObjBuilder* metadataBob) {
try {
- audit::writeImpersonatedUsersToMetadata(txn, metadataBob);
+ audit::writeImpersonatedUsersToMetadata(opCtx, metadataBob);
- ClientMetadataIsMasterState::writeToMetadata(txn, metadataBob);
+ ClientMetadataIsMasterState::writeToMetadata(opCtx, metadataBob);
if (!shardedConnection) {
return Status::OK();
}
@@ -68,10 +68,10 @@ Status ShardingEgressMetadataHook::writeRequestMetadata(bool shardedConnection,
}
}
-Status ShardingEgressMetadataHook::writeRequestMetadata(OperationContext* txn,
+Status ShardingEgressMetadataHook::writeRequestMetadata(OperationContext* opCtx,
const HostAndPort& target,
BSONObjBuilder* metadataBob) {
- return writeRequestMetadata(true, txn, target.toString(), metadataBob);
+ return writeRequestMetadata(true, opCtx, target.toString(), metadataBob);
}
Status ShardingEgressMetadataHook::readReplyMetadata(const StringData replySource,
diff --git a/src/mongo/s/sharding_egress_metadata_hook.h b/src/mongo/s/sharding_egress_metadata_hook.h
index df105c813bf..1c8849dcea3 100644
--- a/src/mongo/s/sharding_egress_metadata_hook.h
+++ b/src/mongo/s/sharding_egress_metadata_hook.h
@@ -46,7 +46,7 @@ public:
virtual ~ShardingEgressMetadataHook() = default;
Status readReplyMetadata(const HostAndPort& replySource, const BSONObj& metadataObj) override;
- Status writeRequestMetadata(OperationContext* txn,
+ Status writeRequestMetadata(OperationContext* opCtx,
const HostAndPort& target,
BSONObjBuilder* metadataBob) override;
@@ -57,7 +57,7 @@ public:
// contact.
Status readReplyMetadata(const StringData replySource, const BSONObj& metadataObj);
Status writeRequestMetadata(bool shardedConnection,
- OperationContext* txn,
+ OperationContext* opCtx,
const StringData target,
BSONObjBuilder* metadataBob);
diff --git a/src/mongo/s/sharding_initialization.cpp b/src/mongo/s/sharding_initialization.cpp
index 1ffcbda99dd..db41e736fc6 100644
--- a/src/mongo/s/sharding_initialization.cpp
+++ b/src/mongo/s/sharding_initialization.cpp
@@ -145,17 +145,17 @@ std::unique_ptr<TaskExecutorPool> makeTaskExecutorPool(
const StringData kDistLockProcessIdForConfigServer("ConfigServer");
-std::string generateDistLockProcessId(OperationContext* txn) {
+std::string generateDistLockProcessId(OperationContext* opCtx) {
std::unique_ptr<SecureRandom> rng(SecureRandom::create());
return str::stream()
<< HostAndPort(getHostName(), serverGlobalParams.port).toString() << ':'
<< durationCount<Seconds>(
- txn->getServiceContext()->getPreciseClockSource()->now().toDurationSinceEpoch())
+ opCtx->getServiceContext()->getPreciseClockSource()->now().toDurationSinceEpoch())
<< ':' << rng->nextInt64();
}
-Status initializeGlobalShardingState(OperationContext* txn,
+Status initializeGlobalShardingState(OperationContext* opCtx,
const ConnectionString& configCS,
StringData distLockProcessId,
std::unique_ptr<ShardFactory> shardFactory,
@@ -189,7 +189,7 @@ Status initializeGlobalShardingState(OperationContext* txn,
auto shardRegistry(stdx::make_unique<ShardRegistry>(std::move(shardFactory), configCS));
auto catalogClient =
- makeCatalogClient(txn->getServiceContext(), shardRegistry.get(), distLockProcessId);
+ makeCatalogClient(opCtx->getServiceContext(), shardRegistry.get(), distLockProcessId);
auto rawCatalogClient = catalogClient.get();
@@ -227,20 +227,20 @@ Status initializeGlobalShardingState(OperationContext* txn,
return Status::OK();
}
-Status reloadShardRegistryUntilSuccess(OperationContext* txn) {
+Status reloadShardRegistryUntilSuccess(OperationContext* opCtx) {
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
return Status::OK();
}
while (!globalInShutdownDeprecated()) {
- auto stopStatus = txn->checkForInterruptNoAssert();
+ auto stopStatus = opCtx->checkForInterruptNoAssert();
if (!stopStatus.isOK()) {
return stopStatus;
}
try {
- uassertStatusOK(ClusterIdentityLoader::get(txn)->loadClusterId(
- txn, repl::ReadConcernLevel::kMajorityReadConcern));
+ uassertStatusOK(ClusterIdentityLoader::get(opCtx)->loadClusterId(
+ opCtx, repl::ReadConcernLevel::kMajorityReadConcern));
if (grid.shardRegistry()->isUp()) {
return Status::OK();
}
diff --git a/src/mongo/s/sharding_initialization.h b/src/mongo/s/sharding_initialization.h
index 33e3046074c..f2e3d12db8b 100644
--- a/src/mongo/s/sharding_initialization.h
+++ b/src/mongo/s/sharding_initialization.h
@@ -63,13 +63,13 @@ extern const StringData kDistLockProcessIdForConfigServer;
/**
* Generates a uniform string to be used as a process id for the distributed lock manager.
*/
-std::string generateDistLockProcessId(OperationContext* txn);
+std::string generateDistLockProcessId(OperationContext* opCtx);
/**
* Takes in the connection string for reaching the config servers and initializes the global
* ShardingCatalogClient, ShardingCatalogManager, ShardRegistry, and Grid objects.
*/
-Status initializeGlobalShardingState(OperationContext* txn,
+Status initializeGlobalShardingState(OperationContext* opCtx,
const ConnectionString& configCS,
StringData distLockProcessId,
std::unique_ptr<ShardFactory> shardFactory,
@@ -80,6 +80,6 @@ Status initializeGlobalShardingState(OperationContext* txn,
* Tries to contact the config server and reload the shard registry and the cluster ID until it
* succeeds or is interrupted.
*/
-Status reloadShardRegistryUntilSuccess(OperationContext* txn);
+Status reloadShardRegistryUntilSuccess(OperationContext* opCtx);
} // namespace mongo
diff --git a/src/mongo/s/sharding_raii.cpp b/src/mongo/s/sharding_raii.cpp
index ea50d5ce128..b90f975ed35 100644
--- a/src/mongo/s/sharding_raii.cpp
+++ b/src/mongo/s/sharding_raii.cpp
@@ -47,9 +47,9 @@ ScopedShardDatabase::ScopedShardDatabase(std::shared_ptr<DBConfig> db) : _db(db)
ScopedShardDatabase::~ScopedShardDatabase() = default;
-StatusWith<ScopedShardDatabase> ScopedShardDatabase::getExisting(OperationContext* txn,
+StatusWith<ScopedShardDatabase> ScopedShardDatabase::getExisting(OperationContext* opCtx,
StringData dbName) {
- auto dbStatus = Grid::get(txn)->catalogCache()->getDatabase(txn, dbName.toString());
+ auto dbStatus = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName.toString());
if (!dbStatus.isOK()) {
return {dbStatus.getStatus().code(),
str::stream() << "Database " << dbName << " was not found due to "
@@ -59,18 +59,18 @@ StatusWith<ScopedShardDatabase> ScopedShardDatabase::getExisting(OperationContex
return {ScopedShardDatabase(std::move(dbStatus.getValue()))};
}
-StatusWith<ScopedShardDatabase> ScopedShardDatabase::getOrCreate(OperationContext* txn,
+StatusWith<ScopedShardDatabase> ScopedShardDatabase::getOrCreate(OperationContext* opCtx,
StringData dbName) {
- auto dbStatus = getExisting(txn, dbName);
+ auto dbStatus = getExisting(opCtx, dbName);
if (dbStatus.isOK()) {
return dbStatus;
}
if (dbStatus == ErrorCodes::NamespaceNotFound) {
auto statusCreateDb =
- Grid::get(txn)->catalogClient(txn)->createDatabase(txn, dbName.toString());
+ Grid::get(opCtx)->catalogClient(opCtx)->createDatabase(opCtx, dbName.toString());
if (statusCreateDb.isOK() || statusCreateDb == ErrorCodes::NamespaceExists) {
- return getExisting(txn, dbName);
+ return getExisting(opCtx, dbName);
}
return statusCreateDb;
@@ -87,22 +87,22 @@ ScopedChunkManager::ScopedChunkManager(ScopedShardDatabase db, std::shared_ptr<S
ScopedChunkManager::~ScopedChunkManager() = default;
-StatusWith<ScopedChunkManager> ScopedChunkManager::get(OperationContext* txn,
+StatusWith<ScopedChunkManager> ScopedChunkManager::get(OperationContext* opCtx,
const NamespaceString& nss) {
- auto scopedDbStatus = ScopedShardDatabase::getExisting(txn, nss.db());
+ auto scopedDbStatus = ScopedShardDatabase::getExisting(opCtx, nss.db());
if (!scopedDbStatus.isOK()) {
return scopedDbStatus.getStatus();
}
auto scopedDb = std::move(scopedDbStatus.getValue());
- auto cm = scopedDb.db()->getChunkManagerIfExists(txn, nss.ns());
+ auto cm = scopedDb.db()->getChunkManagerIfExists(opCtx, nss.ns());
if (cm) {
return {ScopedChunkManager(std::move(scopedDb), std::move(cm))};
}
auto shardStatus =
- Grid::get(txn)->shardRegistry()->getShard(txn, scopedDb.db()->getPrimaryId());
+ Grid::get(opCtx)->shardRegistry()->getShard(opCtx, scopedDb.db()->getPrimaryId());
if (!shardStatus.isOK()) {
return {ErrorCodes::fromInt(40371),
str::stream() << "The primary shard for collection " << nss.ns()
@@ -113,19 +113,19 @@ StatusWith<ScopedChunkManager> ScopedChunkManager::get(OperationContext* txn,
return {ScopedChunkManager(std::move(scopedDb), std::move(shardStatus.getValue()))};
}
-StatusWith<ScopedChunkManager> ScopedChunkManager::getOrCreate(OperationContext* txn,
+StatusWith<ScopedChunkManager> ScopedChunkManager::getOrCreate(OperationContext* opCtx,
const NamespaceString& nss) {
- auto scopedDbStatus = ScopedShardDatabase::getOrCreate(txn, nss.db());
+ auto scopedDbStatus = ScopedShardDatabase::getOrCreate(opCtx, nss.db());
if (!scopedDbStatus.isOK()) {
return scopedDbStatus.getStatus();
}
- return ScopedChunkManager::get(txn, nss);
+ return ScopedChunkManager::get(opCtx, nss);
}
-StatusWith<ScopedChunkManager> ScopedChunkManager::refreshAndGet(OperationContext* txn,
+StatusWith<ScopedChunkManager> ScopedChunkManager::refreshAndGet(OperationContext* opCtx,
const NamespaceString& nss) {
- auto scopedDbStatus = ScopedShardDatabase::getExisting(txn, nss.db());
+ auto scopedDbStatus = ScopedShardDatabase::getExisting(opCtx, nss.db());
if (!scopedDbStatus.isOK()) {
return scopedDbStatus.getStatus();
}
@@ -134,7 +134,7 @@ StatusWith<ScopedChunkManager> ScopedChunkManager::refreshAndGet(OperationContex
try {
std::shared_ptr<ChunkManager> cm =
- scopedDb.db()->getChunkManager(txn, nss.ns(), true, false);
+ scopedDb.db()->getChunkManager(opCtx, nss.ns(), true, false);
if (!cm) {
return {ErrorCodes::NamespaceNotSharded,
diff --git a/src/mongo/s/sharding_raii.h b/src/mongo/s/sharding_raii.h
index 92d5858f36b..0c54f281985 100644
--- a/src/mongo/s/sharding_raii.h
+++ b/src/mongo/s/sharding_raii.h
@@ -48,14 +48,14 @@ public:
* Otherwise, either returns NamespaceNotFound if the database does not exist, or any other
* error code indicating why the database could not be loaded.
*/
- static StatusWith<ScopedShardDatabase> getExisting(OperationContext* txn, StringData dbName);
+ static StatusWith<ScopedShardDatabase> getExisting(OperationContext* opCtx, StringData dbName);
/**
* If the specified database exists already, loads it in the cache (if not already there) and
* returns it. Otherwise, if it does not exis, this call will implicitly create it as
* non-sharded.
*/
- static StatusWith<ScopedShardDatabase> getOrCreate(OperationContext* txn, StringData dbName);
+ static StatusWith<ScopedShardDatabase> getOrCreate(OperationContext* opCtx, StringData dbName);
/**
* Returns the underlying database cache entry.
@@ -94,13 +94,13 @@ public:
* Returns NamespaceNotFound if the database does not exist, or any other error indicating
* problem communicating with the config server.
*/
- static StatusWith<ScopedChunkManager> get(OperationContext* txn, const NamespaceString& nss);
+ static StatusWith<ScopedChunkManager> get(OperationContext* opCtx, const NamespaceString& nss);
/**
* If the database holding the specified namespace does not exist, creates it and then behaves
* like the 'get' method above.
*/
- static StatusWith<ScopedChunkManager> getOrCreate(OperationContext* txn,
+ static StatusWith<ScopedChunkManager> getOrCreate(OperationContext* opCtx,
const NamespaceString& nss);
/**
@@ -110,7 +110,7 @@ public:
* metadata and if so incorporates those. Otherwise, if it does not exist or any other error
* occurs, passes that error back.
*/
- static StatusWith<ScopedChunkManager> refreshAndGet(OperationContext* txn,
+ static StatusWith<ScopedChunkManager> refreshAndGet(OperationContext* opCtx,
const NamespaceString& nss);
/**
diff --git a/src/mongo/s/sharding_uptime_reporter.cpp b/src/mongo/s/sharding_uptime_reporter.cpp
index d2d170df176..8d507dbfce6 100644
--- a/src/mongo/s/sharding_uptime_reporter.cpp
+++ b/src/mongo/s/sharding_uptime_reporter.cpp
@@ -57,7 +57,9 @@ std::string constructInstanceIdString() {
* Reports the uptime status of the current instance to the config.pings collection. This method
* is best-effort and never throws.
*/
-void reportStatus(OperationContext* txn, const std::string& instanceId, const Timer& upTimeTimer) {
+void reportStatus(OperationContext* opCtx,
+ const std::string& instanceId,
+ const Timer& upTimeTimer) {
MongosType mType;
mType.setName(instanceId);
mType.setPing(jsTime());
@@ -67,8 +69,8 @@ void reportStatus(OperationContext* txn, const std::string& instanceId, const Ti
mType.setMongoVersion(VersionInfoInterface::instance().version().toString());
try {
- Grid::get(txn)->catalogClient(txn)->updateConfigDocument(
- txn,
+ Grid::get(opCtx)->catalogClient(opCtx)->updateConfigDocument(
+ opCtx,
MongosType::ConfigNS,
BSON(MongosType::name(instanceId)),
BSON("$set" << mType.toBSON()),
@@ -99,11 +101,12 @@ void ShardingUptimeReporter::startPeriodicThread() {
while (!globalInShutdownDeprecated()) {
{
- auto txn = cc().makeOperationContext();
- reportStatus(txn.get(), instanceId, upTimeTimer);
+ auto opCtx = cc().makeOperationContext();
+ reportStatus(opCtx.get(), instanceId, upTimeTimer);
- auto status =
- Grid::get(txn.get())->getBalancerConfiguration()->refreshAndCheck(txn.get());
+ auto status = Grid::get(opCtx.get())
+ ->getBalancerConfiguration()
+ ->refreshAndCheck(opCtx.get());
if (!status.isOK()) {
warning() << "failed to refresh mongos settings" << causedBy(status);
}
diff --git a/src/mongo/s/write_ops/batch_write_exec.cpp b/src/mongo/s/write_ops/batch_write_exec.cpp
index 6a6395eea13..57f84edf800 100644
--- a/src/mongo/s/write_ops/batch_write_exec.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec.cpp
@@ -84,7 +84,7 @@ static void noteStaleResponses(const vector<ShardError*>& staleErrors, NSTargete
// This only applies when no writes are occurring and metadata is not changing on reload
static const int kMaxRoundsWithoutProgress(5);
-void BatchWriteExec::executeBatch(OperationContext* txn,
+void BatchWriteExec::executeBatch(OperationContext* opCtx,
const BatchedCommandRequest& clientRequest,
BatchedCommandResponse* clientResponse,
BatchWriteExecStats* stats) {
@@ -132,7 +132,7 @@ void BatchWriteExec::executeBatch(OperationContext* txn,
// record target errors definitively.
bool recordTargetErrors = refreshedTargeter;
Status targetStatus =
- batchOp.targetBatch(txn, *_targeter, recordTargetErrors, &childBatches);
+ batchOp.targetBatch(opCtx, *_targeter, recordTargetErrors, &childBatches);
if (!targetStatus.isOK()) {
// Don't do anything until a targeter refresh
_targeter->noteCouldNotTarget();
@@ -171,8 +171,8 @@ void BatchWriteExec::executeBatch(OperationContext* txn,
// Figure out what host we need to dispatch our targeted batch
const ReadPreferenceSetting readPref(ReadPreference::PrimaryOnly, TagSet());
- auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(
- txn, nextBatch->getEndpoint().shardName);
+ auto shardStatus = Grid::get(opCtx)->shardRegistry()->getShard(
+ opCtx, nextBatch->getEndpoint().shardName);
bool resolvedHost = false;
ConnectionString shardHost;
@@ -327,7 +327,7 @@ void BatchWriteExec::executeBatch(OperationContext* txn,
//
bool targeterChanged = false;
- Status refreshStatus = _targeter->refreshIfNeeded(txn, &targeterChanged);
+ Status refreshStatus = _targeter->refreshIfNeeded(opCtx, &targeterChanged);
if (!refreshStatus.isOK()) {
// It's okay if we can't refresh, we'll just record errors for the ops if
diff --git a/src/mongo/s/write_ops/batch_write_exec.h b/src/mongo/s/write_ops/batch_write_exec.h
index 739e16a046d..b430e3c5baf 100644
--- a/src/mongo/s/write_ops/batch_write_exec.h
+++ b/src/mongo/s/write_ops/batch_write_exec.h
@@ -72,7 +72,7 @@ public:
*
* This function does not throw, any errors are reported via the clientResponse.
*/
- void executeBatch(OperationContext* txn,
+ void executeBatch(OperationContext* opCtx,
const BatchedCommandRequest& clientRequest,
BatchedCommandResponse* clientResponse,
BatchWriteExecStats* stats);
diff --git a/src/mongo/s/write_ops/batch_write_op.cpp b/src/mongo/s/write_ops/batch_write_op.cpp
index 4f7a4efe52c..6f0aad917b4 100644
--- a/src/mongo/s/write_ops/batch_write_op.cpp
+++ b/src/mongo/s/write_ops/batch_write_op.cpp
@@ -233,7 +233,7 @@ static void cancelBatches(const WriteErrorDetail& why,
batchMap->clear();
}
-Status BatchWriteOp::targetBatch(OperationContext* txn,
+Status BatchWriteOp::targetBatch(OperationContext* opCtx,
const NSTargeter& targeter,
bool recordTargetErrors,
vector<TargetedWriteBatch*>* targetedBatches) {
@@ -293,7 +293,7 @@ Status BatchWriteOp::targetBatch(OperationContext* txn,
OwnedPointerVector<TargetedWrite> writesOwned;
vector<TargetedWrite*>& writes = writesOwned.mutableVector();
- Status targetStatus = writeOp.targetWrites(txn, targeter, &writes);
+ Status targetStatus = writeOp.targetWrites(opCtx, targeter, &writes);
if (!targetStatus.isOK()) {
WriteErrorDetail targetError;
diff --git a/src/mongo/s/write_ops/batch_write_op.h b/src/mongo/s/write_ops/batch_write_op.h
index 030767851c3..455b84e1cd3 100644
--- a/src/mongo/s/write_ops/batch_write_op.h
+++ b/src/mongo/s/write_ops/batch_write_op.h
@@ -106,7 +106,7 @@ public:
*
* Returned TargetedWriteBatches are owned by the caller.
*/
- Status targetBatch(OperationContext* txn,
+ Status targetBatch(OperationContext* opCtx,
const NSTargeter& targeter,
bool recordTargetErrors,
std::vector<TargetedWriteBatch*>* targetedBatches);
diff --git a/src/mongo/s/write_ops/batch_write_op_test.cpp b/src/mongo/s/write_ops/batch_write_op_test.cpp
index ea50d84a996..e5bc9b5a5cd 100644
--- a/src/mongo/s/write_ops/batch_write_op_test.cpp
+++ b/src/mongo/s/write_ops/batch_write_op_test.cpp
@@ -135,7 +135,7 @@ TEST(WriteOpTests, SingleOp) {
// Single-op targeting test
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
@@ -152,7 +152,7 @@ TEST(WriteOpTests, SingleOp) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -175,7 +175,7 @@ TEST(WriteOpTests, SingleError) {
// Single-op error test
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
@@ -192,7 +192,7 @@ TEST(WriteOpTests, SingleError) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -221,7 +221,7 @@ TEST(WriteOpTests, SingleTargetError) {
// Single-op targeting error test
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
@@ -238,14 +238,14 @@ TEST(WriteOpTests, SingleTargetError) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(!status.isOK());
ASSERT(!batchOp.isFinished());
ASSERT_EQUALS(targeted.size(), 0u);
// Record targeting failures
- status = batchOp.targetBatch(&txn, targeter, true, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, true, &targeted);
ASSERT(status.isOK());
ASSERT(batchOp.isFinished());
@@ -264,7 +264,7 @@ TEST(WriteOpTests, SingleWriteConcernErrorOrdered) {
// write concern error if one occurs
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
@@ -281,7 +281,7 @@ TEST(WriteOpTests, SingleWriteConcernErrorOrdered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -314,7 +314,7 @@ TEST(WriteOpTests, SingleStaleError) {
// We should retry the same batch until we're not stale
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
@@ -329,7 +329,7 @@ TEST(WriteOpTests, SingleStaleError) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
BatchedCommandResponse response;
buildResponse(0, &response);
@@ -340,14 +340,14 @@ TEST(WriteOpTests, SingleStaleError) {
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
// Respond again with a stale response
batchOp.noteBatchResponse(*targeted.front(), response, NULL);
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
buildResponse(1, &response);
@@ -381,7 +381,7 @@ TEST(WriteOpTests, MultiOpSameShardOrdered) {
// Multi-op targeting test (ordered)
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
@@ -400,7 +400,7 @@ TEST(WriteOpTests, MultiOpSameShardOrdered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -425,7 +425,7 @@ TEST(WriteOpTests, MultiOpSameShardUnordered) {
// Multi-op targeting test (unordered)
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
@@ -444,7 +444,7 @@ TEST(WriteOpTests, MultiOpSameShardUnordered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -470,7 +470,7 @@ TEST(WriteOpTests, MultiOpTwoShardsOrdered) {
// There should be two sets of single batches (one to each shard, one-by-one)
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -490,7 +490,7 @@ TEST(WriteOpTests, MultiOpTwoShardsOrdered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -506,7 +506,7 @@ TEST(WriteOpTests, MultiOpTwoShardsOrdered) {
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
ASSERT_EQUALS(targeted.size(), 1u);
@@ -529,7 +529,7 @@ TEST(WriteOpTests, MultiOpTwoShardsUnordered) {
// There should be one set of two batches (one to each shard)
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -549,7 +549,7 @@ TEST(WriteOpTests, MultiOpTwoShardsUnordered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -581,7 +581,7 @@ TEST(WriteOpTests, MultiOpTwoShardsEachOrdered) {
// There should be two sets of two batches to each shard (two for each delete op)
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -603,7 +603,7 @@ TEST(WriteOpTests, MultiOpTwoShardsEachOrdered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -624,7 +624,7 @@ TEST(WriteOpTests, MultiOpTwoShardsEachOrdered) {
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
ASSERT_EQUALS(targeted.size(), 2u);
@@ -652,7 +652,7 @@ TEST(WriteOpTests, MultiOpTwoShardsEachUnordered) {
// There should be one set of two batches to each shard (containing writes for both ops)
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -674,7 +674,7 @@ TEST(WriteOpTests, MultiOpTwoShardsEachUnordered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -708,7 +708,7 @@ TEST(WriteOpTests, MultiOpOneOrTwoShardsOrdered) {
// last ops should be batched together
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -736,7 +736,7 @@ TEST(WriteOpTests, MultiOpOneOrTwoShardsOrdered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -753,7 +753,7 @@ TEST(WriteOpTests, MultiOpOneOrTwoShardsOrdered) {
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -774,7 +774,7 @@ TEST(WriteOpTests, MultiOpOneOrTwoShardsOrdered) {
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -792,7 +792,7 @@ TEST(WriteOpTests, MultiOpOneOrTwoShardsOrdered) {
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -820,7 +820,7 @@ TEST(WriteOpTests, MultiOpOneOrTwoShardsUnordered) {
// Should batch all the ops together into two batches of four ops for each shard
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -848,7 +848,7 @@ TEST(WriteOpTests, MultiOpOneOrTwoShardsUnordered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -882,7 +882,7 @@ TEST(WriteOpTests, MultiOpSingleShardErrorUnordered) {
// There should be one set of two batches to each shard and an error reported
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -901,7 +901,7 @@ TEST(WriteOpTests, MultiOpSingleShardErrorUnordered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -946,7 +946,7 @@ TEST(WriteOpTests, MultiOpTwoShardErrorsUnordered) {
// There should be one set of two batches to each shard and and two errors reported
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -965,7 +965,7 @@ TEST(WriteOpTests, MultiOpTwoShardErrorsUnordered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -1013,7 +1013,7 @@ TEST(WriteOpTests, MultiOpPartialSingleShardErrorUnordered) {
// There should be one set of two batches to each shard and an error reported
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -1034,7 +1034,7 @@ TEST(WriteOpTests, MultiOpPartialSingleShardErrorUnordered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -1080,7 +1080,7 @@ TEST(WriteOpTests, MultiOpPartialSingleShardErrorOrdered) {
// op should not get run
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -1101,7 +1101,7 @@ TEST(WriteOpTests, MultiOpPartialSingleShardErrorOrdered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -1151,7 +1151,7 @@ TEST(WriteOpTests, MultiOpErrorAndWriteConcernErrorUnordered) {
// Don't suppress the error if ordered : false
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
@@ -1169,7 +1169,7 @@ TEST(WriteOpTests, MultiOpErrorAndWriteConcernErrorUnordered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
BatchedCommandResponse response;
buildResponse(1, &response);
@@ -1195,7 +1195,7 @@ TEST(WriteOpTests, SingleOpErrorAndWriteConcernErrorOrdered) {
// Suppress the write concern error if ordered and we also have an error
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -1214,7 +1214,7 @@ TEST(WriteOpTests, SingleOpErrorAndWriteConcernErrorOrdered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
BatchedCommandResponse response;
buildResponse(1, &response);
@@ -1246,7 +1246,7 @@ TEST(WriteOpTests, MultiOpFailedTargetOrdered) {
// Targeting failure on second op in batch op (ordered)
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
@@ -1265,14 +1265,14 @@ TEST(WriteOpTests, MultiOpFailedTargetOrdered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
// First targeting round fails since we may be stale
ASSERT(!status.isOK());
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, true, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, true, &targeted);
// Second targeting round is ok, but should stop at first write
ASSERT(status.isOK());
@@ -1288,7 +1288,7 @@ TEST(WriteOpTests, MultiOpFailedTargetOrdered) {
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, true, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, true, &targeted);
// Second targeting round results in an error which finishes the batch
ASSERT(status.isOK());
@@ -1309,7 +1309,7 @@ TEST(WriteOpTests, MultiOpFailedTargetUnordered) {
// Targeting failure on second op in batch op (unordered)
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
@@ -1329,14 +1329,14 @@ TEST(WriteOpTests, MultiOpFailedTargetUnordered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
// First targeting round fails since we may be stale
ASSERT(!status.isOK());
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, true, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, true, &targeted);
// Second targeting round is ok, and should record an error
ASSERT(status.isOK());
@@ -1366,7 +1366,7 @@ TEST(WriteOpTests, MultiOpFailedBatchOrdered) {
// Expect this gets translated down into write errors for first affected write
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -1384,7 +1384,7 @@ TEST(WriteOpTests, MultiOpFailedBatchOrdered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
BatchedCommandResponse response;
buildResponse(1, &response);
@@ -1394,7 +1394,7 @@ TEST(WriteOpTests, MultiOpFailedBatchOrdered) {
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, true, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, true, &targeted);
buildErrResponse(ErrorCodes::UnknownError, "mock error", &response);
@@ -1419,7 +1419,7 @@ TEST(WriteOpTests, MultiOpFailedBatchUnordered) {
// Expect this gets translated down into write errors for all affected writes
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -1438,7 +1438,7 @@ TEST(WriteOpTests, MultiOpFailedBatchUnordered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
BatchedCommandResponse response;
buildResponse(1, &response);
@@ -1472,7 +1472,7 @@ TEST(WriteOpTests, MultiOpAbortOrdered) {
// Expect this gets translated down into write error for first affected write
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -1490,7 +1490,7 @@ TEST(WriteOpTests, MultiOpAbortOrdered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
BatchedCommandResponse response;
buildResponse(1, &response);
@@ -1522,7 +1522,7 @@ TEST(WriteOpTests, MultiOpAbortUnordered) {
// Expect this gets translated down into write errors for all affected writes
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -1563,7 +1563,7 @@ TEST(WriteOpTests, MultiOpTwoWCErrors) {
// error
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -1581,7 +1581,7 @@ TEST(WriteOpTests, MultiOpTwoWCErrors) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
BatchedCommandResponse response;
buildResponse(1, &response);
@@ -1592,7 +1592,7 @@ TEST(WriteOpTests, MultiOpTwoWCErrors) {
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, true, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, true, &targeted);
// Second shard write write concern fails.
batchOp.noteBatchResponse(*targeted.front(), response, NULL);
@@ -1615,7 +1615,7 @@ TEST(WriteOpLimitTests, OneBigDoc) {
// Big single operation test - should go through
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
@@ -1634,7 +1634,7 @@ TEST(WriteOpLimitTests, OneBigDoc) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT_EQUALS(targeted.size(), 1u);
@@ -1650,7 +1650,7 @@ TEST(WriteOpLimitTests, OneBigOneSmall) {
// Big doc with smaller additional doc - should go through as two batches
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
@@ -1671,7 +1671,7 @@ TEST(WriteOpLimitTests, OneBigOneSmall) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT_EQUALS(targeted.size(), 1u);
ASSERT_EQUALS(targeted.front()->getWrites().size(), 1u);
@@ -1683,7 +1683,7 @@ TEST(WriteOpLimitTests, OneBigOneSmall) {
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT_EQUALS(targeted.size(), 1u);
ASSERT_EQUALS(targeted.front()->getWrites().size(), 1u);
@@ -1697,7 +1697,7 @@ TEST(WriteOpLimitTests, TooManyOps) {
// Batch of 1002 documents
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
@@ -1716,7 +1716,7 @@ TEST(WriteOpLimitTests, TooManyOps) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT_EQUALS(targeted.size(), 1u);
ASSERT_EQUALS(targeted.front()->getWrites().size(), 1000u);
@@ -1728,7 +1728,7 @@ TEST(WriteOpLimitTests, TooManyOps) {
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT_EQUALS(targeted.size(), 1u);
ASSERT_EQUALS(targeted.front()->getWrites().size(), 2u);
@@ -1743,7 +1743,7 @@ TEST(WriteOpLimitTests, UpdateOverheadIncluded) {
// calculation
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
@@ -1780,7 +1780,7 @@ TEST(WriteOpLimitTests, UpdateOverheadIncluded) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT_EQUALS(targeted.size(), 1u);
ASSERT_LESS_THAN(targeted.front()->getWrites().size(), 1000u);
@@ -1796,7 +1796,7 @@ TEST(WriteOpLimitTests, UpdateOverheadIncluded) {
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT_EQUALS(targeted.size(), 1u);
ASSERT_LESS_THAN(targeted.front()->getWrites().size(), 1000u);
diff --git a/src/mongo/s/write_ops/mock_ns_targeter.h b/src/mongo/s/write_ops/mock_ns_targeter.h
index 44bc5be18c2..a430e3caa7d 100644
--- a/src/mongo/s/write_ops/mock_ns_targeter.h
+++ b/src/mongo/s/write_ops/mock_ns_targeter.h
@@ -85,7 +85,9 @@ public:
/**
* Returns a ShardEndpoint for the doc from the mock ranges
*/
- Status targetInsert(OperationContext* txn, const BSONObj& doc, ShardEndpoint** endpoint) const {
+ Status targetInsert(OperationContext* opCtx,
+ const BSONObj& doc,
+ ShardEndpoint** endpoint) const {
std::vector<ShardEndpoint*> endpoints;
Status status = targetQuery(doc, &endpoints);
if (!status.isOK())
@@ -99,7 +101,7 @@ public:
* Returns the first ShardEndpoint for the query from the mock ranges. Only can handle
* queries of the form { field : { $gte : <value>, $lt : <value> } }.
*/
- Status targetUpdate(OperationContext* txn,
+ Status targetUpdate(OperationContext* opCtx,
const BatchedUpdateDocument& updateDoc,
std::vector<ShardEndpoint*>* endpoints) const {
return targetQuery(updateDoc.getQuery(), endpoints);
@@ -109,7 +111,7 @@ public:
* Returns the first ShardEndpoint for the query from the mock ranges. Only can handle
* queries of the form { field : { $gte : <value>, $lt : <value> } }.
*/
- Status targetDelete(OperationContext* txn,
+ Status targetDelete(OperationContext* opCtx,
const BatchedDeleteDocument& deleteDoc,
std::vector<ShardEndpoint*>* endpoints) const {
return targetQuery(deleteDoc.getQuery(), endpoints);
@@ -140,7 +142,7 @@ public:
// No-op
}
- Status refreshIfNeeded(OperationContext* txn, bool* wasChanged) {
+ Status refreshIfNeeded(OperationContext* opCtx, bool* wasChanged) {
// No-op
if (wasChanged)
*wasChanged = false;
diff --git a/src/mongo/s/write_ops/write_op.cpp b/src/mongo/s/write_ops/write_op.cpp
index 25f9f13b3aa..6f58a58a124 100644
--- a/src/mongo/s/write_ops/write_op.cpp
+++ b/src/mongo/s/write_ops/write_op.cpp
@@ -65,7 +65,7 @@ const WriteErrorDetail& WriteOp::getOpError() const {
return *_error;
}
-Status WriteOp::targetWrites(OperationContext* txn,
+Status WriteOp::targetWrites(OperationContext* opCtx,
const NSTargeter& targeter,
std::vector<TargetedWrite*>* targetedWrites) {
bool isUpdate = _itemRef.getOpType() == BatchedCommandRequest::BatchType_Update;
@@ -77,16 +77,16 @@ Status WriteOp::targetWrites(OperationContext* txn,
vector<ShardEndpoint*>& endpoints = endpointsOwned.mutableVector();
if (isUpdate) {
- targetStatus = targeter.targetUpdate(txn, *_itemRef.getUpdate(), &endpoints);
+ targetStatus = targeter.targetUpdate(opCtx, *_itemRef.getUpdate(), &endpoints);
} else if (isDelete) {
- targetStatus = targeter.targetDelete(txn, *_itemRef.getDelete(), &endpoints);
+ targetStatus = targeter.targetDelete(opCtx, *_itemRef.getDelete(), &endpoints);
} else {
dassert(_itemRef.getOpType() == BatchedCommandRequest::BatchType_Insert);
ShardEndpoint* endpoint = NULL;
// TODO: Remove the index targeting stuff once there is a command for it
if (!isIndexInsert) {
- targetStatus = targeter.targetInsert(txn, _itemRef.getDocument(), &endpoint);
+ targetStatus = targeter.targetInsert(opCtx, _itemRef.getDocument(), &endpoint);
} else {
// TODO: Retry index writes with stale version?
targetStatus = targeter.targetCollection(&endpoints);
diff --git a/src/mongo/s/write_ops/write_op.h b/src/mongo/s/write_ops/write_op.h
index d56be517fe7..1be62c36d0e 100644
--- a/src/mongo/s/write_ops/write_op.h
+++ b/src/mongo/s/write_ops/write_op.h
@@ -122,7 +122,7 @@ public:
* Returns !OK if the targeting process itself fails
* (no TargetedWrites will be added, state unchanged)
*/
- Status targetWrites(OperationContext* txn,
+ Status targetWrites(OperationContext* opCtx,
const NSTargeter& targeter,
std::vector<TargetedWrite*>* targetedWrites);
diff --git a/src/mongo/s/write_ops/write_op_test.cpp b/src/mongo/s/write_ops/write_op_test.cpp
index c54d4f7070a..b780a79a344 100644
--- a/src/mongo/s/write_ops/write_op_test.cpp
+++ b/src/mongo/s/write_ops/write_op_test.cpp
@@ -83,7 +83,7 @@ TEST(WriteOpTests, TargetSingle) {
// Basic targeting test
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
@@ -105,7 +105,7 @@ TEST(WriteOpTests, TargetSingle) {
OwnedPointerVector<TargetedWrite> targetedOwned;
vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
- Status status = writeOp.targetWrites(&txn, targeter, &targeted);
+ Status status = writeOp.targetWrites(&opCtx, targeter, &targeted);
ASSERT(status.isOK());
ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Pending);
@@ -141,7 +141,7 @@ TEST(WriteOpTests, TargetMultiOneShard) {
// Multi-write targeting test where our query goes to one shard
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion(10, 0, OID()));
@@ -167,7 +167,7 @@ TEST(WriteOpTests, TargetMultiOneShard) {
OwnedPointerVector<TargetedWrite> targetedOwned;
vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
- Status status = writeOp.targetWrites(&txn, targeter, &targeted);
+ Status status = writeOp.targetWrites(&opCtx, targeter, &targeted);
ASSERT(status.isOK());
ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Pending);
@@ -184,7 +184,7 @@ TEST(WriteOpTests, TargetMultiAllShards) {
// Multi-write targeting test where our write goes to more than one shard
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion(10, 0, OID()));
@@ -211,7 +211,7 @@ TEST(WriteOpTests, TargetMultiAllShards) {
OwnedPointerVector<TargetedWrite> targetedOwned;
vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
- Status status = writeOp.targetWrites(&txn, targeter, &targeted);
+ Status status = writeOp.targetWrites(&opCtx, targeter, &targeted);
ASSERT(status.isOK());
ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Pending);
@@ -236,7 +236,7 @@ TEST(WriteOpTests, ErrorSingle) {
// Single error after targeting test
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
@@ -258,7 +258,7 @@ TEST(WriteOpTests, ErrorSingle) {
OwnedPointerVector<TargetedWrite> targetedOwned;
vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
- Status status = writeOp.targetWrites(&txn, targeter, &targeted);
+ Status status = writeOp.targetWrites(&opCtx, targeter, &targeted);
ASSERT(status.isOK());
ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Pending);
@@ -282,7 +282,7 @@ TEST(WriteOpTests, CancelSingle) {
// Cancel single targeting test
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
@@ -304,7 +304,7 @@ TEST(WriteOpTests, CancelSingle) {
OwnedPointerVector<TargetedWrite> targetedOwned;
vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
- Status status = writeOp.targetWrites(&txn, targeter, &targeted);
+ Status status = writeOp.targetWrites(&opCtx, targeter, &targeted);
ASSERT(status.isOK());
ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Pending);
@@ -325,7 +325,7 @@ TEST(WriteOpTests, RetrySingleOp) {
// Retry single targeting test
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
@@ -347,7 +347,7 @@ TEST(WriteOpTests, RetrySingleOp) {
OwnedPointerVector<TargetedWrite> targetedOwned;
vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
- Status status = writeOp.targetWrites(&txn, targeter, &targeted);
+ Status status = writeOp.targetWrites(&opCtx, targeter, &targeted);
ASSERT(status.isOK());
ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Pending);
diff --git a/src/mongo/scripting/dbdirectclient_factory.cpp b/src/mongo/scripting/dbdirectclient_factory.cpp
index e7e0ea9e478..06c95e3e949 100644
--- a/src/mongo/scripting/dbdirectclient_factory.cpp
+++ b/src/mongo/scripting/dbdirectclient_factory.cpp
@@ -48,18 +48,18 @@ DBDirectClientFactory& DBDirectClientFactory::get(ServiceContext* context) {
return forService(context);
}
-DBDirectClientFactory& DBDirectClientFactory::get(OperationContext* txn) {
- fassert(40152, txn);
- return get(txn->getServiceContext());
+DBDirectClientFactory& DBDirectClientFactory::get(OperationContext* opCtx) {
+ fassert(40152, opCtx);
+ return get(opCtx->getServiceContext());
}
void DBDirectClientFactory::registerImplementation(Impl implementation) {
_implementation = std::move(implementation);
}
-auto DBDirectClientFactory::create(OperationContext* txn) -> Result {
+auto DBDirectClientFactory::create(OperationContext* opCtx) -> Result {
uassert(40153, "Cannot create a direct client in this context", _implementation);
- return _implementation(txn);
+ return _implementation(opCtx);
}
} // namespace mongo
diff --git a/src/mongo/scripting/dbdirectclient_factory.h b/src/mongo/scripting/dbdirectclient_factory.h
index cf524e40849..2eb361bc7fe 100644
--- a/src/mongo/scripting/dbdirectclient_factory.h
+++ b/src/mongo/scripting/dbdirectclient_factory.h
@@ -44,10 +44,10 @@ public:
using Impl = stdx::function<Result(OperationContext*)>;
static DBDirectClientFactory& get(ServiceContext* service);
- static DBDirectClientFactory& get(OperationContext* txn);
+ static DBDirectClientFactory& get(OperationContext* opCtx);
void registerImplementation(Impl implementation);
- Result create(OperationContext* txn);
+ Result create(OperationContext* opCtx);
private:
Impl _implementation;
diff --git a/src/mongo/scripting/engine.cpp b/src/mongo/scripting/engine.cpp
index 514568589bb..76b56a60ba9 100644
--- a/src/mongo/scripting/engine.cpp
+++ b/src/mongo/scripting/engine.cpp
@@ -192,8 +192,8 @@ public:
void rollback() {}
};
-void Scope::storedFuncMod(OperationContext* txn) {
- txn->recoveryUnit()->registerChange(new StoredFuncModLogOpHandler());
+void Scope::storedFuncMod(OperationContext* opCtx) {
+ opCtx->recoveryUnit()->registerChange(new StoredFuncModLogOpHandler());
}
void Scope::validateObjectIdString(const string& str) {
@@ -202,7 +202,7 @@ void Scope::validateObjectIdString(const string& str) {
uassert(10430, "invalid object id: not hex", std::isxdigit(str.at(i)));
}
-void Scope::loadStored(OperationContext* txn, bool ignoreNotConnected) {
+void Scope::loadStored(OperationContext* opCtx, bool ignoreNotConnected) {
if (_localDBName.size() == 0) {
if (ignoreNotConnected)
return;
@@ -216,7 +216,7 @@ void Scope::loadStored(OperationContext* txn, bool ignoreNotConnected) {
_loadedVersion = lastVersion;
string coll = _localDBName + ".system.js";
- auto directDBClient = DBDirectClientFactory::get(txn).create(txn);
+ auto directDBClient = DBDirectClientFactory::get(opCtx).create(opCtx);
unique_ptr<DBClientCursor> c =
directDBClient->query(coll, Query(), 0, 0, NULL, QueryOption_SlaveOk, 0);
@@ -344,7 +344,7 @@ public:
_pools.push_front(toStore);
}
- std::shared_ptr<Scope> tryAcquire(OperationContext* txn, const string& poolName) {
+ std::shared_ptr<Scope> tryAcquire(OperationContext* opCtx, const string& poolName) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
for (Pools::iterator it = _pools.begin(); it != _pools.end(); ++it) {
@@ -353,7 +353,7 @@ public:
_pools.erase(it);
scope->incTimesUsed();
scope->reset();
- scope->registerOperation(txn);
+ scope->registerOperation(opCtx);
return scope;
}
}
@@ -402,8 +402,8 @@ public:
void reset() {
_real->reset();
}
- void registerOperation(OperationContext* txn) {
- _real->registerOperation(txn);
+ void registerOperation(OperationContext* opCtx) {
+ _real->registerOperation(opCtx);
}
void unregisterOperation() {
_real->unregisterOperation();
@@ -411,14 +411,14 @@ public:
void init(const BSONObj* data) {
_real->init(data);
}
- void localConnectForDbEval(OperationContext* txn, const char* dbName) {
+ void localConnectForDbEval(OperationContext* opCtx, const char* dbName) {
invariant(!"localConnectForDbEval should only be called from dbEval");
}
void setLocalDB(const string& dbName) {
_real->setLocalDB(dbName);
}
- void loadStored(OperationContext* txn, bool ignoreNotConnected = false) {
- _real->loadStored(txn, ignoreNotConnected);
+ void loadStored(OperationContext* opCtx, bool ignoreNotConnected = false) {
+ _real->loadStored(opCtx, ignoreNotConnected);
}
void externalSetup() {
_real->externalSetup();
@@ -532,20 +532,20 @@ private:
};
/** Get a scope from the pool of scopes matching the supplied pool name */
-unique_ptr<Scope> ScriptEngine::getPooledScope(OperationContext* txn,
+unique_ptr<Scope> ScriptEngine::getPooledScope(OperationContext* opCtx,
const string& db,
const string& scopeType) {
const string fullPoolName = db + scopeType;
- std::shared_ptr<Scope> s = scopeCache.tryAcquire(txn, fullPoolName);
+ std::shared_ptr<Scope> s = scopeCache.tryAcquire(opCtx, fullPoolName);
if (!s) {
s.reset(newScope());
- s->registerOperation(txn);
+ s->registerOperation(opCtx);
}
unique_ptr<Scope> p;
p.reset(new PooledScope(fullPoolName, s));
p->setLocalDB(db);
- p->loadStored(txn, true);
+ p->loadStored(opCtx, true);
return p;
}
diff --git a/src/mongo/scripting/engine.h b/src/mongo/scripting/engine.h
index 40633a81afe..80d4f35f654 100644
--- a/src/mongo/scripting/engine.h
+++ b/src/mongo/scripting/engine.h
@@ -56,7 +56,7 @@ public:
virtual void reset() = 0;
virtual void init(const BSONObj* data) = 0;
- virtual void registerOperation(OperationContext* txn) = 0;
+ virtual void registerOperation(OperationContext* opCtx) = 0;
virtual void unregisterOperation() = 0;
void init(const char* data) {
@@ -64,7 +64,7 @@ public:
init(&o);
}
- virtual void localConnectForDbEval(OperationContext* txn, const char* dbName) = 0;
+ virtual void localConnectForDbEval(OperationContext* opCtx, const char* dbName) = 0;
virtual void externalSetup() = 0;
virtual void setLocalDB(const std::string& localDBName) {
_localDBName = localDBName;
@@ -161,13 +161,13 @@ public:
void execCoreFiles();
- virtual void loadStored(OperationContext* txn, bool ignoreNotConnected = false);
+ virtual void loadStored(OperationContext* opCtx, bool ignoreNotConnected = false);
/**
* if any changes are made to .system.js, call this
* right now its just global - slightly inefficient, but a lot simpler
*/
- static void storedFuncMod(OperationContext* txn);
+ static void storedFuncMod(OperationContext* opCtx);
static void validateObjectIdString(const std::string& str);
@@ -261,7 +261,7 @@ public:
* This must include authenticated users.
* @return the scope
*/
- std::unique_ptr<Scope> getPooledScope(OperationContext* txn,
+ std::unique_ptr<Scope> getPooledScope(OperationContext* opCtx,
const std::string& db,
const std::string& scopeType);
diff --git a/src/mongo/scripting/mozjs/engine.cpp b/src/mongo/scripting/mozjs/engine.cpp
index f67ba8e161b..da842847ec6 100644
--- a/src/mongo/scripting/mozjs/engine.cpp
+++ b/src/mongo/scripting/mozjs/engine.cpp
@@ -147,15 +147,15 @@ void MozJSScriptEngine::setJSHeapLimitMB(int limit) {
jsHeapLimitMB.store(limit);
}
-void MozJSScriptEngine::registerOperation(OperationContext* txn, MozJSImplScope* scope) {
+void MozJSScriptEngine::registerOperation(OperationContext* opCtx, MozJSImplScope* scope) {
stdx::lock_guard<stdx::mutex> giLock(_globalInterruptLock);
- auto opId = txn->getOpID();
+ auto opId = opCtx->getOpID();
_opToScopeMap[opId] = scope;
LOG(2) << "SMScope " << static_cast<const void*>(scope) << " registered for op " << opId;
- Status status = txn->checkForInterruptNoAssert();
+ Status status = opCtx->checkForInterruptNoAssert();
if (!status.isOK()) {
scope->kill();
}
diff --git a/src/mongo/scripting/mozjs/implscope.cpp b/src/mongo/scripting/mozjs/implscope.cpp
index d660149ae2d..eb393f3703d 100644
--- a/src/mongo/scripting/mozjs/implscope.cpp
+++ b/src/mongo/scripting/mozjs/implscope.cpp
@@ -157,18 +157,18 @@ std::string MozJSImplScope::getError() {
return "";
}
-void MozJSImplScope::registerOperation(OperationContext* txn) {
+void MozJSImplScope::registerOperation(OperationContext* opCtx) {
invariant(_opCtx == nullptr);
// getPooledScope may call registerOperation with a nullptr, so we have to
// check for that here.
- if (!txn)
+ if (!opCtx)
return;
- _opCtx = txn;
- _opId = txn->getOpID();
+ _opCtx = opCtx;
+ _opId = opCtx->getOpID();
- _engine->registerOperation(txn, this);
+ _engine->registerOperation(opCtx, this);
}
void MozJSImplScope::unregisterOperation() {
@@ -751,7 +751,7 @@ void MozJSImplScope::gc() {
JS_RequestInterruptCallback(_runtime);
}
-void MozJSImplScope::localConnectForDbEval(OperationContext* txn, const char* dbName) {
+void MozJSImplScope::localConnectForDbEval(OperationContext* opCtx, const char* dbName) {
MozJSEntry entry(this);
if (_connectState == ConnectState::External)
@@ -782,7 +782,7 @@ void MozJSImplScope::localConnectForDbEval(OperationContext* txn, const char* db
_connectState = ConnectState::Local;
_localDBName = dbName;
- loadStored(txn);
+ loadStored(opCtx);
}
void MozJSImplScope::externalSetup() {
diff --git a/src/mongo/scripting/mozjs/implscope.h b/src/mongo/scripting/mozjs/implscope.h
index f6d90d1678e..02667c8b388 100644
--- a/src/mongo/scripting/mozjs/implscope.h
+++ b/src/mongo/scripting/mozjs/implscope.h
@@ -96,11 +96,11 @@ public:
OperationContext* getOpContext() const;
- void registerOperation(OperationContext* txn) override;
+ void registerOperation(OperationContext* opCtx) override;
void unregisterOperation() override;
- void localConnectForDbEval(OperationContext* txn, const char* dbName) override;
+ void localConnectForDbEval(OperationContext* opCtx, const char* dbName) override;
void externalSetup() override;
diff --git a/src/mongo/scripting/mozjs/mongo.cpp b/src/mongo/scripting/mozjs/mongo.cpp
index 68d6b05719f..ac2bf2832f5 100644
--- a/src/mongo/scripting/mozjs/mongo.cpp
+++ b/src/mongo/scripting/mozjs/mongo.cpp
@@ -677,8 +677,8 @@ void MongoLocalInfo::construct(JSContext* cx, JS::CallArgs args) {
if (args.length() != 0)
uasserted(ErrorCodes::BadValue, "local Mongo constructor takes no args");
- auto txn = scope->getOpContext();
- auto conn = DBDirectClientFactory::get(txn).create(txn);
+ auto opCtx = scope->getOpContext();
+ auto conn = DBDirectClientFactory::get(opCtx).create(opCtx);
JS::RootedObject thisv(cx);
scope->getProto<MongoLocalInfo>().newObject(&thisv);
diff --git a/src/mongo/scripting/mozjs/proxyscope.cpp b/src/mongo/scripting/mozjs/proxyscope.cpp
index cf41e84091c..2b9db2045c2 100644
--- a/src/mongo/scripting/mozjs/proxyscope.cpp
+++ b/src/mongo/scripting/mozjs/proxyscope.cpp
@@ -85,16 +85,16 @@ bool MozJSProxyScope::isKillPending() const {
return _implScope->isKillPending();
}
-void MozJSProxyScope::registerOperation(OperationContext* txn) {
- run([&] { _implScope->registerOperation(txn); });
+void MozJSProxyScope::registerOperation(OperationContext* opCtx) {
+ run([&] { _implScope->registerOperation(opCtx); });
}
void MozJSProxyScope::unregisterOperation() {
run([&] { _implScope->unregisterOperation(); });
}
-void MozJSProxyScope::localConnectForDbEval(OperationContext* txn, const char* dbName) {
- run([&] { _implScope->localConnectForDbEval(txn, dbName); });
+void MozJSProxyScope::localConnectForDbEval(OperationContext* opCtx, const char* dbName) {
+ run([&] { _implScope->localConnectForDbEval(opCtx, dbName); });
}
void MozJSProxyScope::externalSetup() {
diff --git a/src/mongo/scripting/mozjs/proxyscope.h b/src/mongo/scripting/mozjs/proxyscope.h
index d19e24149bd..39f3fbefc19 100644
--- a/src/mongo/scripting/mozjs/proxyscope.h
+++ b/src/mongo/scripting/mozjs/proxyscope.h
@@ -113,11 +113,11 @@ public:
bool isKillPending() const override;
- void registerOperation(OperationContext* txn) override;
+ void registerOperation(OperationContext* opCtx) override;
void unregisterOperation() override;
- void localConnectForDbEval(OperationContext* txn, const char* dbName) override;
+ void localConnectForDbEval(OperationContext* opCtx, const char* dbName) override;
void externalSetup() override;
diff --git a/src/mongo/util/admin_access.h b/src/mongo/util/admin_access.h
index 721aeb2db23..f6a9821077c 100644
--- a/src/mongo/util/admin_access.h
+++ b/src/mongo/util/admin_access.h
@@ -50,14 +50,14 @@ public:
/** @return if there are any priviledge users. This should not
* block for long and throw if can't get a lock if needed.
*/
- virtual bool haveAdminUsers(OperationContext* txn) const = 0;
+ virtual bool haveAdminUsers(OperationContext* opCtx) const = 0;
};
class NoAdminAccess : public AdminAccess {
public:
virtual ~NoAdminAccess() {}
- virtual bool haveAdminUsers(OperationContext* txn) const {
+ virtual bool haveAdminUsers(OperationContext* opCtx) const {
return false;
}
};
diff --git a/src/mongo/util/concurrency/notification.h b/src/mongo/util/concurrency/notification.h
index 32ce9caa068..091dca02a60 100644
--- a/src/mongo/util/concurrency/notification.h
+++ b/src/mongo/util/concurrency/notification.h
@@ -66,9 +66,9 @@ public:
* If the notification has been set, returns immediately. Otherwise blocks until it becomes set.
* If the wait is interrupted, throws an exception.
*/
- T& get(OperationContext* txn) {
+ T& get(OperationContext* opCtx) {
stdx::unique_lock<stdx::mutex> lock(_mutex);
- txn->waitForConditionOrInterrupt(_condVar, lock, [this]() -> bool { return !!_value; });
+ opCtx->waitForConditionOrInterrupt(_condVar, lock, [this]() -> bool { return !!_value; });
return _value.get();
}
@@ -100,7 +100,7 @@ public:
* If the notification is not set, blocks either until it becomes set or until the waitTimeout
* expires. If the wait is interrupted, throws an exception. Otherwise, returns immediately.
*/
- bool waitFor(OperationContext* txn, Microseconds waitTimeout) {
+ bool waitFor(OperationContext* opCtx, Microseconds waitTimeout) {
const auto waitDeadline = Date_t::now() + waitTimeout;
stdx::unique_lock<stdx::mutex> lock(_mutex);
@@ -123,8 +123,8 @@ public:
return _notification.operator bool();
}
- void get(OperationContext* txn) {
- _notification.get(txn);
+ void get(OperationContext* opCtx) {
+ _notification.get(opCtx);
}
void get() {
@@ -135,8 +135,8 @@ public:
_notification.set(true);
}
- bool waitFor(OperationContext* txn, Microseconds waitTimeout) {
- return _notification.waitFor(txn, waitTimeout);
+ bool waitFor(OperationContext* opCtx, Microseconds waitTimeout) {
+ return _notification.waitFor(opCtx, waitTimeout);
}
private:
diff --git a/src/mongo/util/fail_point_server_parameter.cpp b/src/mongo/util/fail_point_server_parameter.cpp
index 3e31122dfa3..9b881cccb42 100644
--- a/src/mongo/util/fail_point_server_parameter.cpp
+++ b/src/mongo/util/fail_point_server_parameter.cpp
@@ -48,7 +48,7 @@ FailPointServerParameter::FailPointServerParameter(std::string name, FailPoint*
_failpoint(failpoint),
_failPointName(name) {}
-void FailPointServerParameter::append(OperationContext* txn,
+void FailPointServerParameter::append(OperationContext* opCtx,
BSONObjBuilder& b,
const std::string& name) {
b << name << _failpoint->toBSON();
diff --git a/src/mongo/util/fail_point_server_parameter.h b/src/mongo/util/fail_point_server_parameter.h
index 3a92966c79b..cf0ca5b8ed3 100644
--- a/src/mongo/util/fail_point_server_parameter.h
+++ b/src/mongo/util/fail_point_server_parameter.h
@@ -42,7 +42,7 @@ public:
FailPointServerParameter(std::string name, FailPoint* failpoint);
- void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) override;
+ void append(OperationContext* opCtx, BSONObjBuilder& b, const std::string& name) override;
Status set(const BSONElement& newValueElement) override;
Status setFromString(const std::string& str) override;
diff --git a/src/mongo/util/heap_profiler.cpp b/src/mongo/util/heap_profiler.cpp
index 8b1de2a0dd5..de2024ce2db 100644
--- a/src/mongo/util/heap_profiler.cpp
+++ b/src/mongo/util/heap_profiler.cpp
@@ -649,7 +649,7 @@ public:
return HeapProfiler::enabledParameter;
}
- BSONObj generateSection(OperationContext* txn,
+ BSONObj generateSection(OperationContext* opCtx,
const BSONElement& configElement) const override {
BSONObjBuilder builder;
HeapProfiler::generateServerStatusSection(builder);
diff --git a/src/mongo/util/progress_meter.h b/src/mongo/util/progress_meter.h
index 3dca5350692..b3e731fb52d 100644
--- a/src/mongo/util/progress_meter.h
+++ b/src/mongo/util/progress_meter.h
@@ -126,7 +126,7 @@ private:
};
// e.g.:
-// CurOp * op = CurOp::get(txn);
+// CurOp * op = CurOp::get(opCtx);
// ProgressMeterHolder pm(op->setMessage("index: (1/3) external sort",
// "Index: External Sort Progress", d->stats.nrecords, 10)); loop { pm.hit(); }
class ProgressMeterHolder {
diff --git a/src/mongo/util/tcmalloc_server_status_section.cpp b/src/mongo/util/tcmalloc_server_status_section.cpp
index 303076ba784..21159e43754 100644
--- a/src/mongo/util/tcmalloc_server_status_section.cpp
+++ b/src/mongo/util/tcmalloc_server_status_section.cpp
@@ -100,7 +100,8 @@ public:
return true;
}
- virtual BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ virtual BSONObj generateSection(OperationContext* opCtx,
+ const BSONElement& configElement) const {
long long verbosity = 1;
if (configElement) {
// Relies on the fact that safeNumberLong turns non-numbers into 0.
diff --git a/src/mongo/util/tcmalloc_set_parameter.cpp b/src/mongo/util/tcmalloc_set_parameter.cpp
index 130f77535cf..ca9cf7eb108 100644
--- a/src/mongo/util/tcmalloc_set_parameter.cpp
+++ b/src/mongo/util/tcmalloc_set_parameter.cpp
@@ -55,7 +55,7 @@ public:
explicit TcmallocNumericPropertyServerParameter(const std::string& serverParameterName,
const std::string& tcmallocPropertyName);
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name);
+ virtual void append(OperationContext* opCtx, BSONObjBuilder& b, const std::string& name);
virtual Status set(const BSONElement& newValueElement);
virtual Status setFromString(const std::string& str);
@@ -71,7 +71,7 @@ TcmallocNumericPropertyServerParameter::TcmallocNumericPropertyServerParameter(
true /* change at runtime */),
_tcmallocPropertyName(tcmallocPropertyName) {}
-void TcmallocNumericPropertyServerParameter::append(OperationContext* txn,
+void TcmallocNumericPropertyServerParameter::append(OperationContext* opCtx,
BSONObjBuilder& b,
const std::string& name) {
size_t value;